From fafd246d7c7c041cd6a1ed1b4a057456ff858914 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 26 May 2022 23:47:49 +0800 Subject: [PATCH 001/164] Add vTaskYieldWithinAPI for taskYIELD_IF_USING_PREEMPTION * Add configNUM_CORES config for SMP * Add portGET_CORE_ID porting config and default return 0 to compatible with single core demos * Replace xYieldPending with xYieldPendings for multiple cores * Add vTaskYieldWithinAPI function for yield pending if the task is in criticial section. This check are enabled only when portCRITICAL_NESTING_IN_TCB is enabled * taskYIELD_IF_USING_PREEMPTION use vTaskYieldWithinAPI when configUSE_PREEMPTION is set to 1 The following sections will be updated in other commits * taskYIELD_IF_USING_PREEMPTION usage in multiple cores * xYieldPendings usage in multiple cores --- include/FreeRTOS.h | 15 +++++++++- include/task.h | 5 ++++ tasks.c | 72 +++++++++++++++++++++++++++++++++++----------- 3 files changed, 74 insertions(+), 18 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 74872cfdef6..52f07d5695d 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -287,6 +287,20 @@ #define portSOFTWARE_BARRIER() #endif +#ifndef configNUM_CORES + #define configNUM_CORES 1 +#endif + +#ifndef portGET_CORE_ID + + #if configNUM_CORES == 1 + #define portGET_CORE_ID() 0 + #else + #error configNUM_CORES is set to more than 1 then portGET_CORE_ID must also be defined. + #endif /* configNUM_CORES */ + +#endif /* portGET_CORE_ID */ + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 @@ -1068,7 +1082,6 @@ #define configRUN_ADDITIONAL_TESTS 0 #endif - /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using * dynamically allocated RAM, in which case when any task is deleted it is known * that both the task's stack and TCB need to be freed. Sometimes the diff --git a/include/task.h b/include/task.h index a4e959bc6e3..a07359e1935 100644 --- a/include/task.h +++ b/include/task.h @@ -3109,6 +3109,11 @@ TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; */ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; +/* + * For internal use only. Same as portYIELD_WITHIN_API() in single core FreeRTOS. + * For SMP this is not defined by the port. + */ +void vTaskYieldWithinAPI( void ); /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/tasks.c b/tasks.c index 82ad273ddd9..c535955f084 100644 --- a/tasks.c +++ b/tasks.c @@ -64,7 +64,7 @@ * performed just because a higher priority task has been woken. */ #define taskYIELD_IF_USING_PREEMPTION() #else - #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() #endif /* Values that can be assigned to the ucNotifyState member of the TCB. */ @@ -375,7 +375,7 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ @@ -1183,7 +1183,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + /* SMP_TODO : The task deleted not necessary running on the CPU. Fix + * this with pxTCB->xTaskRunState. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ portGET_CORE_ID() ] ); } else { @@ -1933,7 +1935,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* Mark that a yield is pending in case the user is not * using the return value to initiate a context switch * from the ISR using portYIELD_FROM_ISR. */ - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this when reviewing other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -2222,7 +2225,8 @@ BaseType_t xTaskResumeAll( void ) * the current task then a yield must be performed. */ if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) { - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this when reviewing other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -2254,7 +2258,8 @@ BaseType_t xTaskResumeAll( void ) { if( xTaskIncrementTick() != pdFALSE ) { - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this when reviewing other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -2272,7 +2277,8 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPending != pdFALSE ) + /* SMP_TODO : Fix this when reviewing other commit. */ + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -2708,7 +2714,8 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) { /* Pend the yield to be performed when the scheduler * is unsuspended. */ - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -2875,7 +2882,8 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) { - if( xYieldPending != pdFALSE ) + /* SMP_TODO : fix this in other commit. */ + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { xSwitchRequired = pdTRUE; } @@ -3021,11 +3029,13 @@ void vTaskSwitchContext( void ) { /* The scheduler is currently suspended - do not allow a context * switch. */ - xYieldPending = pdTRUE; + /* SMP_TODO : fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { - xYieldPending = pdFALSE; + /* SMP_TODO : fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdFALSE; traceTASK_SWITCHED_OUT(); #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -3234,7 +3244,8 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) /* Mark that a yield is pending in case the user is not using the * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* SMP_TODO : fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -3289,7 +3300,8 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, * a context switch is required. This function is called with the * scheduler suspended so xYieldPending is set so the context switch * occurs immediately that the scheduler is resumed (unsuspended). */ - xYieldPending = pdTRUE; + /* SMP_TODO : fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } /*-----------------------------------------------------------*/ @@ -3381,7 +3393,8 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - xYieldPending = pdTRUE; + /* SMP_TODO : fix this with other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -3570,7 +3583,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) /* A task was made ready while the scheduler was suspended. */ eReturn = eAbortSleep; } - else if( xYieldPending != pdFALSE ) + /* SMP_TODO : fix this with other commit. */ + else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { /* A yield was pended while the scheduler was suspended. */ eReturn = eAbortSleep; @@ -4349,6 +4363,28 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ +/* + * If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ +void vTaskYieldWithinAPI( void ) +{ + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portYIELD_WITHIN_API(); + } + else + { + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + } + #else + portYIELD_WITHIN_API(); + #endif +} +/*-----------------------------------------------------------*/ + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) void vTaskEnterCritical( void ) @@ -5109,7 +5145,8 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter to an ISR * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this in other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { @@ -5200,7 +5237,8 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter in an ISR * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* SMP_TODO : Fix this in other commit. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } else { From 14c3b6ebda77fbab61b4261ea1a08a070d95eda3 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 2 Jun 2022 16:45:09 +0800 Subject: [PATCH 002/164] Use portYIELD_WITHIN_API for portYIELD_WITHIN_API for single core --- include/FreeRTOS.h | 6 ++++++ tasks.c | 22 +++++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 52f07d5695d..c955dce754d 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -291,6 +291,12 @@ #define configNUM_CORES 1 #endif +#if ( configNUM_CORES > 1 ) + #if portCRITICAL_NESTING_IN_TCB == 0 + #error portCRITICAL_NESTING_IN_TCB is required in SMP + #endif +#endif + #ifndef portGET_CORE_ID #if configNUM_CORES == 1 diff --git a/tasks.c b/tasks.c index c535955f084..593ea2cf66d 100644 --- a/tasks.c +++ b/tasks.c @@ -64,7 +64,11 @@ * performed just because a higher priority task has been woken. */ #define taskYIELD_IF_USING_PREEMPTION() #else - #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #if configNUM_CORES == 1 + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #else + #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #endif #endif /* Values that can be assigned to the ucNotifyState member of the TCB. */ @@ -4363,26 +4367,26 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + /* * If not in a critical section then yield immediately. * Otherwise set xYieldPendings to true to wait to * yield until exiting the critical section. */ -void vTaskYieldWithinAPI( void ) -{ - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + void vTaskYieldWithinAPI( void ) + { if( pxCurrentTCB->uxCriticalNesting == 0U ) { - portYIELD_WITHIN_API(); + portYIELD(); } else { xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } - #else - portYIELD_WITHIN_API(); - #endif -} + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ /*-----------------------------------------------------------*/ #if ( portCRITICAL_NESTING_IN_TCB == 1 ) From 2b6d163ccb60b6cc718c194a58956a9bcb66bec6 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 1 Jun 2022 09:53:51 +0800 Subject: [PATCH 003/164] Add xTaskRunState and xIsIdle in TCB * Add xTaskRunState and xIsIdle in TCB * Use xTaskAttribute to replace the xIsIdle in SMP TCB --- include/FreeRTOS.h | 3 +++ tasks.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index c955dce754d..2287c7a8ac7 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1208,6 +1208,9 @@ typedef struct xSTATIC_TCB StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; + #if ( configNUM_CORES > 1 ) + BaseType_t xDummy23[ 2 ]; + #endif uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; diff --git a/tasks.c b/tasks.c index 593ea2cf66d..0c4fd5caf23 100644 --- a/tasks.c +++ b/tasks.c @@ -251,6 +251,20 @@ #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL #endif +/* Indicates that the task is not actively running on any core. */ +#define taskTASK_NOT_RUNNING ( TaskRunning_t ) ( -1 ) + +/* Indicates that the task is actively running but scheduled to yield. */ +#define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) + +/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ +#define taskTASK_IS_RUNNING( xTaskRunState ) ( ( 0 <= xTaskRunState ) && ( xTaskRunState < configNUM_CORES ) ) + +/* Indicates that the task is an Idle task. */ +#define taskATTRIBUTE_IS_IDLE ( 1UL << 0 ) + +typedef BaseType_t TaskRunning_t; + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -268,6 +282,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /*< Points to the start of the stack. */ + #if ( configNUM_CORES > 1 ) + volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ + BaseType_t xTaskAttribute; /*< Used to identify the idle tasks. */ + #endif char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -1029,6 +1047,21 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* portUSING_MPU_WRAPPERS */ + /* Initialize to not running. */ + #if ( configNUM_CORES > 1 ) + pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; + + /* Is this an idle task? */ + if( pxTaskCode == prvIdleTask ) + { + pxNewTCB->xTaskAttribute = taskATTRIBUTE_IS_IDLE; + } + else + { + pxNewTCB->xTaskAttribute = 0; + } + #endif + if( pxCreatedTask != NULL ) { /* Pass the handle out in an anonymous way. The handle can be used to From c348779c226d27c3908ea9f475e1d84f096bff07 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 1 Jun 2022 15:15:54 +0800 Subject: [PATCH 004/164] Add pxCurrentTCBs for multiple cores * Keep pxCurrentTCB for single core * Add pxCurrentTCBs for SMP * Add xTaskGetCurrentTaskHandle for SMP * Replace taskSELECT_HIGHEST_PRIORITY_TASK with temporary prvSelectHighestPriorityTask --- tasks.c | 130 +++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 100 insertions(+), 30 deletions(-) diff --git a/tasks.c b/tasks.c index 0c4fd5caf23..cede9d1b871 100644 --- a/tasks.c +++ b/tasks.c @@ -137,22 +137,26 @@ /*-----------------------------------------------------------*/ - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority = uxTopReadyPriority; \ - \ - /* Find the highest priority queue that contains ready tasks. */ \ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ - { \ - configASSERT( uxTopPriority ); \ - --uxTopPriority; \ - } \ - \ - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #if ( configNUM_CORES == 1 ) + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + * the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #else + #define taskSELECT_HIGHEST_PRIORITY_TASK prvSelectHighestPriorityTask + #endif /*-----------------------------------------------------------*/ @@ -164,6 +168,10 @@ #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + #if ( configNUM_CORES > 1 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION not yet supported in SMP + #endif + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is * performed in a way that is tailored to the particular microcontroller * architecture being used. */ @@ -359,7 +367,12 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ -PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +#if ( configNUM_CORES == 1 ) + PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +#else + PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; + #define pxCurrentTCB xTaskGetCurrentTaskHandle() +#endif /* Lists for ready and blocked tasks. -------------------- * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but @@ -433,6 +446,11 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* File private functions. --------------------------------*/ +/* + * Selects the highest priority available task + */ +static BaseType_t prvSelectHighestPriorityTask( void ); + /** * Utility task that simply returns pdTRUE if the task referenced by xTask is * currently in the Suspended state, or pdFALSE if the task referenced by xTask @@ -594,6 +612,29 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ +/* SMP_TODO : This is a temporay implementation for compilation. + * Update this function in another commit. */ +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) && ( configNUM_CORES > 1 ) + static BaseType_t prvSelectHighestPriorityTask( void ) + { + UBaseType_t uxTopPriority = uxTopReadyPriority; + + /* Find the highest priority queue that contains ready tasks. */ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) + { + configASSERT( uxTopPriority ); + --uxTopPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of + * the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ portGET_CORE_ID() ], &( pxReadyTasksLists[ uxTopPriority ] ) ); + uxTopReadyPriority = uxTopPriority; + } +#endif + +/*-----------------------------------------------------------*/ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, @@ -1087,7 +1128,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* There are no other tasks, or all the other tasks are in * the suspended state - make this the current task. */ - pxCurrentTCB = pxNewTCB; + /* SMP_TODO : fix this in other PR. */ + #if ( configNUM_CORES == 1 ) + pxCurrentTCB = pxNewTCB; + #else + pxCurrentTCBs[ portGET_CORE_ID() ] = pxNewTCB; + #endif if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { @@ -1110,7 +1156,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) { - pxCurrentTCB = pxNewTCB; + /* SMP_TODO : fix this in other PR. */ + #if ( configNUM_CORES == 1 ) + pxCurrentTCB = pxNewTCB; + #else + pxCurrentTCBs[ portGET_CORE_ID() ] = pxNewTCB; + #endif } else { @@ -1809,7 +1860,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * NULL so when the next task is created pxCurrentTCB will * be set to point to it no matter what its relative priority * is. */ - pxCurrentTCB = NULL; + /* SMP_TODO : fix this in other PR. */ + #if ( configNUM_CORES == 1 ) + pxCurrentTCB = NULL; + #else + pxCurrentTCBs[ portGET_CORE_ID() ] = NULL; + #endif } else { @@ -4079,19 +4135,33 @@ static void prvResetNextTaskUnblockTime( void ) } /*-----------------------------------------------------------*/ -#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUM_CORES > 1 ) - TaskHandle_t xTaskGetCurrentTaskHandle( void ) - { - TaskHandle_t xReturn; + #if ( configNUM_CORES == 1 ) + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; - /* A critical section is not required as this is not called from - * an interrupt and the current TCB will always be the same for any - * individual execution thread. */ - xReturn = pxCurrentTCB; + /* A critical section is not required as this is not called from + * an interrupt and the current TCB will always be the same for any + * individual execution thread. */ + xReturn = pxCurrentTCB; - return xReturn; - } + return xReturn; + } + #else + /* SMP_TODO : Fix the interrupt macro in another commit. */ + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + portDISABLE_INTERRUPTS(); + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + portENABLE_INTERRUPTS(); + + return xReturn; + } + #endif #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ /*-----------------------------------------------------------*/ From 70bdf23fd83bc96452f918c3a3fea68575cc60b6 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 13 Jun 2022 21:00:09 +0800 Subject: [PATCH 005/164] Add SMP critical section functions * Update vTaskEnterCritical and vTaskExitCritical functions for SMP * Add vTaskEnterCriticalFromISR and vTaskExitCriticalFromISR for SMP --- include/FreeRTOS.h | 28 ++++++- tasks.c | 205 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 227 insertions(+), 6 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 2287c7a8ac7..3e6d7f5456b 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -299,7 +299,7 @@ #ifndef portGET_CORE_ID - #if configNUM_CORES == 1 + #if ( configNUM_CORES == 1 ) #define portGET_CORE_ID() 0 #else #error configNUM_CORES is set to more than 1 then portGET_CORE_ID must also be defined. @@ -307,6 +307,32 @@ #endif /* portGET_CORE_ID */ +#ifndef portYIELD_CORE + + #if ( configNUM_CORES == 1 ) + #define portYIELD_CORE( x ) portYIELD() + #else + #error configNUM_CORES is set to more than 1 then portYIELD_CORE must also be defined. + #endif /* configNUM_CORES */ + +#endif /* portYIELD_CORE */ + +#ifndef portSET_INTERRUPT_MASK + + #if ( configNUM_CORES == 1 ) + #error portSET_INTERRUPT_MASK is required in SMP + #endif + +#endif /* portSET_INTERRUPT_MASK */ + +#ifndef portCLEAR_INTERRUPT_MASK + + #if ( configNUM_CORES > 1 ) + #error portCLEAR_INTERRUPT_MASK is required in SMP + #endif + +#endif /* portCLEAR_INTERRUPT_MASK */ + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 diff --git a/tasks.c b/tasks.c index cede9d1b871..f642d48c3ca 100644 --- a/tasks.c +++ b/tasks.c @@ -446,6 +446,12 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* File private functions. --------------------------------*/ +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields if so. + */ +static void prvCheckForRunStateChange( void ); + /* * Selects the highest priority available task */ @@ -612,6 +618,84 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) + static void prvCheckForRunStateChange( void ) + { + UBaseType_t uxPrevCriticalNesting; + UBaseType_t uxPrevSchedulerSuspended; + TCB_t * pxThisTCB; + + /* This function should not be called in ISR. If the task on the current + * core is no longer running, then vTaskSwitchContext() probably should + * be run before returning, but we don't have a way to force that to happen + * from here. */ + configASSERT( portCHECK_IF_IN_ISR() ); + + /* This function is always called with interrupts disabled + * so this is safe. */ + pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; + + while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + { + /* We are only here if we just entered a critical section + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ + + uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; + uxPrevSchedulerSuspended = uxSchedulerSuspended; + + /* this must only be called the first time we enter into a critical + * section, otherwise it could context switch in the middle of a + * critical section. */ + configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); + + uxSchedulerSuspended = 0U; + + if( uxPrevCriticalNesting > 0U ) + { + pxThisTCB->uxCriticalNesting = 0U; + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + } + else + { + /* uxPrevSchedulerSuspended must be 1 */ + portRELEASE_TASK_LOCK(); + } + + portMEMORY_BARRIER(); + configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); + + portENABLE_INTERRUPTS(); + + /* Enabling interrupts should cause this core to immediately + * service the pending interrupt and yield. If the run state is still + * yielding here then that is a problem. */ + configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); + + portDISABLE_INTERRUPTS(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; + uxSchedulerSuspended = uxPrevSchedulerSuspended; + + if( uxPrevCriticalNesting == 0U ) + { + /* uxPrevSchedulerSuspended must be 1 */ + configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); + portRELEASE_ISR_LOCK(); + } + } + } +#endif + +/*-----------------------------------------------------------*/ + /* SMP_TODO : This is a temporay implementation for compilation. * Update this function in another commit. */ #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) && ( configNUM_CORES > 1 ) @@ -4500,6 +4584,14 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { + #if ( configNUM_CORES > 1 ) + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + } + #endif + ( pxCurrentTCB->uxCriticalNesting )++; /* This is not the interrupt safe version of the enter critical @@ -4511,12 +4603,47 @@ static void prvResetNextTaskUnblockTime( void ) if( pxCurrentTCB->uxCriticalNesting == 1 ) { portASSERT_IF_IN_ISR(); + #if ( configNUM_CORES > 1 ) + /* The only time there would be a problem is if this is called + * before a context switch and vTaskExitCritical() is called + * after pxCurrentTCB changes. Therefore this should not be + * used within vTaskSwitchContext(). */ + prvCheckForRunStateChange(); + #endif + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ + +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) + + UBaseType_t vTaskEnterCriticalFromISR( void ) + { + UBaseType_t uxSavedInterruptStatus = 0; + + if( xSchedulerRunning != pdFALSE ) + { + uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portGET_ISR_LOCK(); } + + ( pxCurrentTCB->uxCriticalNesting )++; } else { mtCOVERAGE_TEST_MARKER(); } + return uxSavedInterruptStatus; } #endif /* portCRITICAL_NESTING_IN_TCB */ @@ -4526,19 +4653,87 @@ static void prvResetNextTaskUnblockTime( void ) void vTaskExitCritical( void ) { + BaseType_t xYieldCurrentTask; + if( xSchedulerRunning != pdFALSE ) { - if( pxCurrentTCB->uxCriticalNesting > 0U ) + /* If pxCurrentTCB->uxCriticalNesting is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + if( pxCurrentTCB->uxCriticalNesting > 1U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + } + else if( pxCurrentTCB->uxCriticalNesting == 1U ) { ( pxCurrentTCB->uxCriticalNesting )--; - if( pxCurrentTCB->uxCriticalNesting == 0U ) + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + + #if ( configNUM_CORES > 1 ) + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + #endif + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) { - portENABLE_INTERRUPTS(); + portYIELD(); } - else + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) + + void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) + { + BaseType_t xYieldCurrentTask; + + if( xSchedulerRunning != pdFALSE ) + { + /* If pxCurrentTCB->uxCriticalNesting is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + if( pxCurrentTCB->uxCriticalNesting > 1U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + } + else if( pxCurrentTCB->uxCriticalNesting == 1U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + + portRELEASE_ISR_LOCK(); + portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); + portYIELD(); } } else From cb5ab6d14e0d8cea686702f88d1d9003e3bee9a2 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 14 Jun 2022 14:34:13 +0800 Subject: [PATCH 006/164] Add SMP prvYieldCore and prvYieldForTask --- include/FreeRTOS.h | 2 +- include/task.h | 2 + tasks.c | 105 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 107 insertions(+), 2 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 3e6d7f5456b..887c66cc105 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -319,7 +319,7 @@ #ifndef portSET_INTERRUPT_MASK - #if ( configNUM_CORES == 1 ) + #if ( configNUM_CORES > 1 ) #error portSET_INTERRUPT_MASK is required in SMP #endif diff --git a/include/task.h b/include/task.h index a07359e1935..c3acac51244 100644 --- a/include/task.h +++ b/include/task.h @@ -252,6 +252,8 @@ typedef enum #define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) +/* Check if core value is valid */ +#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) /*----------------------------------------------------------- * TASK CREATION API diff --git a/tasks.c b/tasks.c index f642d48c3ca..bc6b1896cfb 100644 --- a/tasks.c +++ b/tasks.c @@ -269,7 +269,7 @@ #define taskTASK_IS_RUNNING( xTaskRunState ) ( ( 0 <= xTaskRunState ) && ( xTaskRunState < configNUM_CORES ) ) /* Indicates that the task is an Idle task. */ -#define taskATTRIBUTE_IS_IDLE ( 1UL << 0 ) +#define taskATTRIBUTE_IS_IDLE ( BaseType_t ) ( 1UL << 0 ) typedef BaseType_t TaskRunning_t; @@ -452,6 +452,18 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t */ static void prvCheckForRunStateChange( void ); +/* + * Yields the given core. + */ +static void prvYieldCore( BaseType_t xCoreID ); + +/* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ +static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ); + /* * Selects the highest priority available task */ @@ -694,6 +706,97 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } #endif +/*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) + static void prvYieldCore( BaseType_t xCoreID ) + { + /* This must be called from a critical section and + * xCoreID must be valid. */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + configASSERT( taskVALID_CORE_ID( xCoreID ) ); + + if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + if( pxCurrentTCBs[ xCoreID ]->xTaskRunState != taskTASK_YIELDING ) + { + if( xCoreID == portGET_CORE_ID() ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + portYIELD_CORE( xCoreID ); + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_YIELDING; + } + } + } + } +#endif + +/*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) + static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ) + { + BaseType_t xLowestPriority; + BaseType_t xTaskPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t xCoreID; + TaskRunning_t xTaskRunState; + + /* This must be called from a critical section. */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; + + if( xPreemptEqualPriority == pdFALSE ) + { + /* xLowestPriority will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriority; + } + + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + { + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; + + /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ + if( pxCurrentTCBs[ xCoreID ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + xTaskPriority = xTaskPriority - 1; + } + + xTaskRunState = pxCurrentTCBs[ xCoreID ]->xTaskRunState; + + if( ( taskTASK_IS_RUNNING( xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) + { + if( xTaskPriority <= xLowestPriority ) + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = xCoreID; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } + } +#endif + /*-----------------------------------------------------------*/ /* SMP_TODO : This is a temporay implementation for compilation. From 42f96a19a9cf64e4f16d5cd4c07fcf4247cf0d6d Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 14 Jun 2022 16:00:34 +0800 Subject: [PATCH 007/164] Add idle tasks for SMP * Add minimal idle task function declaration * Align to use 0x00 for null terminator --- tasks.c | 384 +++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 299 insertions(+), 85 deletions(-) diff --git a/tasks.c b/tasks.c index bc6b1896cfb..476f9e72ab2 100644 --- a/tasks.c +++ b/tasks.c @@ -414,7 +414,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -445,6 +445,10 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ +/* + * Creates the idle tasks during scheduler start + */ +static BaseType_t prvCreateIdleTasks( void ); /* * Checks to see if another task moved the current task out of the ready @@ -498,6 +502,9 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES > 1 ) + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#endif /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -2247,62 +2254,153 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ /*-----------------------------------------------------------*/ -void vTaskStartScheduler( void ) +static BaseType_t prvCreateIdleTasks( void ) { - BaseType_t xReturn; + BaseType_t xReturn = pdPASS; + BaseType_t xCoreID; + char cIdleName[ configMAX_TASK_NAME_LEN ]; - /* Add the idle task at the lowest priority. */ - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + /* Add each idle task at the lowest priority. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, - configIDLE_TASK_NAME, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - - if( xIdleTaskHandle != NULL ) + BaseType_t x; + + if( xReturn == pdFAIL ) { - xReturn = pdPASS; + break; } else { - xReturn = pdFAIL; + mtCOVERAGE_TEST_MARKER(); } - } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ - { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - configIDLE_TASK_NAME, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif /* configSUPPORT_STATIC_ALLOCATION */ - #if ( configUSE_TIMERS == 1 ) - { - if( xReturn == pdPASS ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - xReturn = xTimerCreateTimerTask(); + cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( cIdleName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Append the idle task number to the end of the name if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x++ ] = ( char ) xCoreID + '0'; + + /* And append a null character if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = '\0'; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + if( xCoreID == 0 ) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + #if ( configNUM_CORES > 1 ) + else + { + static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + xIdleTaskStackBuffers[ xCoreID - 1 ], + &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* if ( configNUM_CORES > 1 ) */ + + if( xIdleTaskHandles[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + if( xCoreID == 0 ) + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + #if ( configNUM_CORES > 1 ) + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +void vTaskStartScheduler( void ) +{ + BaseType_t xReturn; + + #if ( configUSE_TIMERS == 1 ) + { + xReturn = xTimerCreateTimerTask(); + } #endif /* configUSE_TIMERS */ + xReturn = prvCreateIdleTasks(); + if( xReturn == pdPASS ) { /* freertos_tasks_c_additions_init() should only be called if the user @@ -2365,8 +2463,8 @@ void vTaskStartScheduler( void ) } /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, - * meaning xIdleTaskHandle is not used anywhere else. */ - ( void ) xIdleTaskHandle; + * meaning xIdleTaskHandles are not used anywhere else. */ + ( void ) xIdleTaskHandles; /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority * from getting optimized out as it is no longer used by the kernel. */ @@ -2859,13 +2957,14 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /*----------------------------------------------------------*/ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) - + /* SMP_TODO : This function returns only idle task handle for core 0. + * Consider to add another function to return the idle task handles. */ TaskHandle_t xTaskGetIdleTaskHandle( void ) { /* If xTaskGetIdleTaskHandle() is called before the scheduler has been - * started, then xIdleTaskHandle will be NULL. */ - configASSERT( ( xIdleTaskHandle != NULL ) ); - return xIdleTaskHandle; + * started, then xIdleTaskHandles will be NULL. */ + configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) ); + return xIdleTaskHandles[ 0 ]; } #endif /* INCLUDE_xTaskGetIdleTaskHandle */ @@ -3717,6 +3816,76 @@ void vTaskMissedYield( void ) #endif /* configUSE_TRACE_FACILITY */ +/* + * ----------------------------------------------------------- + * The MinimalIdle task. + * ---------------------------------------------------------- + * + * The minimal idle task is used for all the additional Cores in a SMP system. + * There must be only 1 idle task and the rest are minimal idle tasks. + * + * @todo additional conditional compiles to remove this function. + */ + +#if ( configNUM_CORES > 1 ) + static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) + { + taskYIELD(); + + for( ; ; ) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + { + extern void vApplicationMinimalIdleHook( void ); + + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* configUSE_MINIMAL_IDLE_HOOK */ + } + } +#endif /* if ( configNUM_CORES > 1 ) */ + /* * ----------------------------------------------------------- * The Idle task. @@ -3741,6 +3910,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * any. */ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + /* All cores start up in the idle task. This initial yield gets the application + * tasks started. */ + taskYIELD(); + for( ; ; ) { /* See if any tasks have deleted themselves - if so then the idle task @@ -3766,9 +3939,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * * A critical region is not required here as we are just reading from * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains more than one task + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) { taskYIELD(); } @@ -3841,6 +4015,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } } #endif /* configUSE_TICKLESS_IDLE */ + + #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + { + extern void vApplicationMinimalIdleHook( void ); + + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* configUSE_MINIMAL_IDLE_HOOK */ } } /*-----------------------------------------------------------*/ @@ -4764,30 +4954,39 @@ static void prvResetNextTaskUnblockTime( void ) * does not match a previous call to vTaskEnterCritical(). */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - if( pxCurrentTCB->uxCriticalNesting > 1U ) - { - ( pxCurrentTCB->uxCriticalNesting )--; - } - else if( pxCurrentTCB->uxCriticalNesting == 1U ) + /* This function should not be called in ISR. Use vTaskExitCriticalFromISR + * to exit critical section from ISR. */ + portASSERT_IF_IN_ISR(); + + if( pxCurrentTCB->uxCriticalNesting > 0U ) { ( pxCurrentTCB->uxCriticalNesting )--; - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; - - #if ( configNUM_CORES > 1 ) - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); - #endif - portENABLE_INTERRUPTS(); - - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) + if( pxCurrentTCB->uxCriticalNesting == 0U ) { - portYIELD(); + #if ( configNUM_CORES > 1 ) + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + #else + portENABLE_INTERRUPTS(); + #endif /* ( configNUM_CORES > 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); } } else @@ -4816,27 +5015,30 @@ static void prvResetNextTaskUnblockTime( void ) * does not match a previous call to vTaskEnterCritical(). */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - if( pxCurrentTCB->uxCriticalNesting > 1U ) - { - ( pxCurrentTCB->uxCriticalNesting )--; - } - else if( pxCurrentTCB->uxCriticalNesting == 1U ) + if( pxCurrentTCB->uxCriticalNesting > 0U ) { ( pxCurrentTCB->uxCriticalNesting )--; - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; - portRELEASE_ISR_LOCK(); - portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + portRELEASE_ISR_LOCK(); + portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + } + else { - portYIELD(); + mtCOVERAGE_TEST_MARKER(); } } else @@ -5723,7 +5925,14 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) { - return xIdleTaskHandle->ulRunTimeCounter; + configRUN_TIME_COUNTER_TYPE ulReturn = 0; + + for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + { + ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + + return ulReturn; } #endif @@ -5734,8 +5943,9 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUM_CORES; /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; @@ -5743,7 +5953,11 @@ TickType_t uxTaskResetEventItemValue( void ) /* Avoid divide by zero errors. */ if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) { - ulReturn = xIdleTaskHandle->ulRunTimeCounter / ulTotalTime; + for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + { + ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + ulReturn = ulRunTimeCounter / ulTotalTime; } else { From a0d7477e010b2f0cef5f48ae22ecedcf77a298ce Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 20 Jun 2022 12:31:40 +0800 Subject: [PATCH 008/164] Merge vTaskSuspendAll and xTaskResumeAll from SMP branch --- include/FreeRTOS.h | 10 +++ tasks.c | 147 ++++++++++++++++++++++++++++++--------------- 2 files changed, 109 insertions(+), 48 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 887c66cc105..17639bd8058 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -333,6 +333,16 @@ #endif /* portCLEAR_INTERRUPT_MASK */ +#ifndef portRELEASE_TASK_LOCK + + #if ( configNUM_CORES == 1 ) + #define portRELEASE_TASK_LOCK() + #else + #error portRELEASE_TASK_LOCK is required in SMP + #endif + +#endif /* portRELEASE_TASK_LOCK */ + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 diff --git a/tasks.c b/tasks.c index 476f9e72ab2..9734ec72ed0 100644 --- a/tasks.c +++ b/tasks.c @@ -2292,24 +2292,26 @@ static BaseType_t prvCreateIdleTasks( void ) } /* Append the idle task number to the end of the name if there is space */ - if( x < configMAX_TASK_NAME_LEN ) - { - cIdleName[ x++ ] = ( char ) xCoreID + '0'; - - /* And append a null character if there is space */ + #if ( configNUM_CORES > 1 ) if( x < configMAX_TASK_NAME_LEN ) { - cIdleName[ x ] = '\0'; + cIdleName[ x++ ] = ( char ) xCoreID + '0'; + + /* And append a null character if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = '\0'; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #endif /* ( configNUM_CORES > 1 ) */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { @@ -2485,22 +2487,66 @@ void vTaskEndScheduler( void ) void vTaskSuspendAll( void ) { - /* A critical section is not required as the variable is of type - * BaseType_t. Please read Richard Barry's reply in the following link to a - * post in the FreeRTOS support forum before reporting this as a bug! - - * https://goo.gl/wu4acr */ - - /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that - * do not otherwise exhibit real time behaviour. */ - portSOFTWARE_BARRIER(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - - /* Enforces ordering for ports and optimised compilers that may otherwise place - * the above increment elsewhere. */ - portMEMORY_BARRIER(); + #if ( configNUM_CORES == 1 ) + { + /* A critical section is not required as the variable is of type + * BaseType_t. Please read Richard Barry's reply in the following link to a + * post in the FreeRTOS support forum before reporting this as a bug! - + * https://goo.gl/wu4acr */ + + /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + + /* Enforces ordering for ports and optimised compilers that may otherwise place + * the above increment elsewhere. */ + portMEMORY_BARRIER(); + } + #else + { + UBaseType_t ulState; + + /* This must only be called from within a task */ + portASSERT_IF_IN_ISR(); + + if( xSchedulerRunning != pdFALSE ) + { + /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + * We must disable interrupts before we grab the locks in the event that this task is + * interrupted and switches context before incrementing uxSchedulerSuspended. + * It is safe to re-enable interrupts after releasing the ISR lock and incrementing + * uxSchedulerSuspended since that will prevent context switches. */ + ulState = portSET_INTERRUPT_MASK(); + + /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); + + if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) + { + prvCheckForRunStateChange(); + } + + portCLEAR_INTERRUPT_MASK( ulState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( configNUM_CORES == 1 ) */ } /*----------------------------------------------------------*/ @@ -2572,10 +2618,6 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); - /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the * removed task will have been added to the xPendingReadyList. Once the @@ -2583,7 +2625,16 @@ BaseType_t xTaskResumeAll( void ) * tasks from this list into their appropriate ready list. */ taskENTER_CRITICAL(); { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + --uxSchedulerSuspended; + portRELEASE_TASK_LOCK(); if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { @@ -2599,17 +2650,9 @@ BaseType_t xTaskResumeAll( void ) listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* If the moved task has a priority higher than or equal to - * the current task then a yield must be performed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - /* SMP_TODO : Fix this when reviewing other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ } if( pxTCB != NULL ) @@ -2626,7 +2669,12 @@ BaseType_t xTaskResumeAll( void ) /* If any ticks occurred while the scheduler was suspended then * they should be processed now. This ensures the tick count does * not slip, and that any delayed tasks are resumed at the correct - * time. */ + * time. + * + * It should be safe to call xTaskIncrementTick here from any core + * since we are in a critical section and xTaskIncrementTick itself + * protects itself within a critical section. Suspending the scheduler + * from any core causes xTaskIncrementTick to increment uxPendedCounts. */ { TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ @@ -2636,8 +2684,9 @@ BaseType_t xTaskResumeAll( void ) { if( xTaskIncrementTick() != pdFALSE ) { - /* SMP_TODO : Fix this when reviewing other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + /* other cores are interrupted from + * within xTaskIncrementTick(). */ + xYieldPendings[ xCoreID ] = pdTRUE; } else { @@ -2655,15 +2704,17 @@ BaseType_t xTaskResumeAll( void ) } } - /* SMP_TODO : Fix this when reviewing other commit. */ - if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + if( xYieldPendings[ xCoreID ] != pdFALSE ) { #if ( configUSE_PREEMPTION != 0 ) { xAlreadyYielded = pdTRUE; } #endif - taskYIELD_IF_USING_PREEMPTION(); + + #if ( configNUM_CORES == 1 ) + taskYIELD_IF_USING_PREEMPTION(); + #endif /* ( configNUM_CORES == 1 ) */ } else { From a2c83bfe3a812b67c5bdce52c9df37e69599abcd Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 23 Jun 2022 12:56:01 +0800 Subject: [PATCH 009/164] Merge vTaskResume and xTaskResumeFromISR from SMP --- tasks.c | 193 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 109 insertions(+), 84 deletions(-) diff --git a/tasks.c b/tasks.c index 9734ec72ed0..9bd48d51d4a 100644 --- a/tasks.c +++ b/tasks.c @@ -463,10 +463,12 @@ static void prvYieldCore( BaseType_t xCoreID ); /* * Yields a core, or cores if multiple priorities are not allowed to run - * simultaneously, to allow the task pxTCB to run. + * simultaneously, to allow the task pxTCB to run. Negative value is returned if + * yeilding for task is not required. Otherwise, core ID is returned. */ -static void prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority ); +static BaseType_t prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority, + BaseType_t xYieldForTask ); /* * Selects the highest priority available task @@ -745,13 +747,34 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) - static void prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority ) +#if ( configNUM_CORES == 1 ) + static BaseType_t prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority, + BaseType_t xYieldForTask ) + { + BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ + + ( void ) xPreemptEqualPriority; + + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xLowestPriorityCore = ( ( BaseType_t ) 0 ); + if( xYieldForTask == pdTRUE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + } + + return xLowestPriorityCore; + } +#else + static BaseType_t prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority, + BaseType_t xYieldForTask ) { BaseType_t xLowestPriority; BaseType_t xTaskPriority; - BaseType_t xLowestPriorityCore = -1; + BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ BaseType_t xCoreID; TaskRunning_t xTaskRunState; @@ -799,8 +822,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( taskVALID_CORE_ID( xLowestPriorityCore ) ) { - prvYieldCore( xLowestPriorityCore ); + if( xYieldForTask == pdTRUE ) + { + prvYieldCore( xLowestPriorityCore ); + } } + + return xLowestPriorityCore; } #endif @@ -2132,8 +2160,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( xTaskToResume ); /* The parameter cannot be NULL as it is impossible to resume the - * currently executing task. */ - if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + * currently executing task. It is also impossible to resume a task + * that is actively running on another core but it is too dangerous + * to check their run state here. Safer to get into a critical section + * and check if it is actually suspended or not below. */ + if( pxTCB != NULL ) { taskENTER_CRITICAL(); { @@ -2146,18 +2177,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - /* This yield may not cause the task just resumed to run, - * but will leave the lists in the correct state for the - * next yield. */ - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Check if Yield is required for this Task in prvYieldForTask. */ + #if ( configUSE_PREEMPTION == 1 ) + ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + #endif } else { @@ -2180,7 +2203,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) { - BaseType_t xYieldRequired = pdFALSE; + BaseType_t xYieldRequired; + BaseType_t xYieldCoreID; TCB_t * const pxTCB = xTaskToResume; UBaseType_t uxSavedInterruptStatus; @@ -2215,19 +2239,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - xYieldRequired = pdTRUE; + /* Check if yield is required for this task in prvYieldForTask. */ + xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) + { /* Mark that a yield is pending in case the user is not * using the return value to initiate a context switch * from the ISR using portYIELD_FROM_ISR. */ - /* SMP_TODO : Fix this when reviewing other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + xYieldPendings[ xYieldCoreID ] = pdTRUE; + xYieldRequired = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + xYieldRequired = pdFALSE; } ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); @@ -2485,69 +2510,69 @@ void vTaskEndScheduler( void ) } /*----------------------------------------------------------*/ -void vTaskSuspendAll( void ) -{ - #if ( configNUM_CORES == 1 ) +#if ( configNUM_CORES == 1 ) + void vTaskSuspendAll( void ) + { + /* A critical section is not required as the variable is of type + * BaseType_t. Please read Richard Barry's reply in the following link to a + * post in the FreeRTOS support forum before reporting this as a bug! - + * https://goo.gl/wu4acr */ + + /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + + /* Enforces ordering for ports and optimised compilers that may otherwise place + * the above increment elsewhere. */ + portMEMORY_BARRIER(); + } +#else + void vTaskSuspendAll( void ) + { + UBaseType_t ulState; + + /* This must only be called from within a task */ + portASSERT_IF_IN_ISR(); + + if( xSchedulerRunning != pdFALSE ) { - /* A critical section is not required as the variable is of type - * BaseType_t. Please read Richard Barry's reply in the following link to a - * post in the FreeRTOS support forum before reporting this as a bug! - - * https://goo.gl/wu4acr */ + /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + * We must disable interrupts before we grab the locks in the event that this task is + * interrupted and switches context before incrementing uxSchedulerSuspended. + * It is safe to re-enable interrupts after releasing the ISR lock and incrementing + * uxSchedulerSuspended since that will prevent context switches. */ + ulState = portSET_INTERRUPT_MASK(); - /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that + /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that * do not otherwise exhibit real time behaviour. */ portSOFTWARE_BARRIER(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment * is used to allow calls to vTaskSuspendAll() to nest. */ ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); - /* Enforces ordering for ports and optimised compilers that may otherwise place - * the above increment elsewhere. */ - portMEMORY_BARRIER(); - } - #else - { - UBaseType_t ulState; - - /* This must only be called from within a task */ - portASSERT_IF_IN_ISR(); - - if( xSchedulerRunning != pdFALSE ) - { - /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. - * We must disable interrupts before we grab the locks in the event that this task is - * interrupted and switches context before incrementing uxSchedulerSuspended. - * It is safe to re-enable interrupts after releasing the ISR lock and incrementing - * uxSchedulerSuspended since that will prevent context switches. */ - ulState = portSET_INTERRUPT_MASK(); - - /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that - * do not otherwise exhibit real time behaviour. */ - portSOFTWARE_BARRIER(); - - portGET_TASK_LOCK(); - portGET_ISR_LOCK(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - portRELEASE_ISR_LOCK(); - - if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) - { - prvCheckForRunStateChange(); - } - - portCLEAR_INTERRUPT_MASK( ulState ); - } - else + if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) { - mtCOVERAGE_TEST_MARKER(); + prvCheckForRunStateChange(); } + + portCLEAR_INTERRUPT_MASK( ulState ); } - #endif /* ( configNUM_CORES == 1 ) */ -} + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +#endif /* ( configNUM_CORES == 1 ) */ + /*----------------------------------------------------------*/ #if ( configUSE_TICKLESS_IDLE != 0 ) @@ -2618,6 +2643,10 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the * removed task will have been added to the xPendingReadyList. Once the @@ -2629,10 +2658,6 @@ BaseType_t xTaskResumeAll( void ) xCoreID = portGET_CORE_ID(); - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); - --uxSchedulerSuspended; portRELEASE_TASK_LOCK(); From fb3439650482d8f65ac1773229732c48028587fd Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 28 Jun 2022 11:42:07 +0800 Subject: [PATCH 010/164] Merge xTaskIncrementTick from SMP --- tasks.c | 114 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 87 insertions(+), 27 deletions(-) diff --git a/tasks.c b/tasks.c index 9bd48d51d4a..ce2a2a2a613 100644 --- a/tasks.c +++ b/tasks.c @@ -754,17 +754,23 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; { BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ - ( void ) xPreemptEqualPriority; - - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xLowestPriorityCore = ( ( BaseType_t ) 0 ); - if( xYieldForTask == pdTRUE ) + } + else + { + if( ( xPreemptEqualPriority == pdTRUE ) && ( pxTCB->uxPriority == pxCurrentTCB->uxPriority ) ) { - taskYIELD_IF_USING_PREEMPTION(); + xLowestPriorityCore = ( ( BaseType_t ) 0 ); } } + if( taskVALID_CORE_ID( xLowestPriorityCore ) && ( xYieldForTask == pdTRUE ) ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + return xLowestPriorityCore; } #else @@ -830,7 +836,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; return xLowestPriorityCore; } -#endif +#endif /* ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -3197,12 +3203,24 @@ BaseType_t xTaskIncrementTick( void ) TCB_t * pxTCB; TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + BaseType_t uxSavedInterruptStatus; + + #if ( configUSE_PREEMPTION == 1 ) + UBaseType_t x; + BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; + #endif /* configUSE_PREEMPTION */ + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsibility to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { /* Minor optimisation. The tick count cannot change in this @@ -3286,17 +3304,12 @@ BaseType_t xTaskIncrementTick( void ) * context switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task has a - * priority that is equal to or higher than the - * currently executing task. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - xSwitchRequired = pdTRUE; - } - else + BaseType_t xYieldCoreID; + + xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) { - mtCOVERAGE_TEST_MARKER(); + xCoreYieldList[ xYieldCoreID ] = pdTRUE; } } #endif /* configUSE_PREEMPTION */ @@ -3309,13 +3322,27 @@ BaseType_t xTaskIncrementTick( void ) * writer has not explicitly turned time slicing off. */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) - { - xSwitchRequired = pdTRUE; - } - else + #if ( configNUM_CORES == 1 ) + TCB_t * pxCurrentTCBs[ 1 ] = { NULL }; + + pxCurrentTCBs[ 0 ] = pxCurrentTCB; + #endif /* ( configNUM_CORES == 1 ) */ + + /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not + * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce + * the number of interrupts and also potentially help prevent tasks from + * moving between cores as often. This, however, works for now. */ + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) { - mtCOVERAGE_TEST_MARKER(); + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -3337,14 +3364,45 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) { - /* SMP_TODO : fix this in other commit. */ - if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) { - xSwitchRequired = pdTRUE; + if( xYieldPendings[ x ] != pdFALSE ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + } + #endif /* configUSE_PREEMPTION */ + + #if ( configUSE_PREEMPTION == 1 ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) { - mtCOVERAGE_TEST_MARKER(); + if( xCoreYieldList[ x ] != pdFALSE ) + { + if( x == xCoreID ) + { + xSwitchRequired = pdTRUE; + } + #if ( configNUM_CORES > 1 ) + else + { + prvYieldCore( x ); + } + #endif /* ( configNUM_CORES > 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } #endif /* configUSE_PREEMPTION */ @@ -3362,6 +3420,8 @@ BaseType_t xTaskIncrementTick( void ) #endif } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + return xSwitchRequired; } /*-----------------------------------------------------------*/ From 16f266afffe5c7908af6beafe1b00a6dcc1adc71 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 29 Jun 2022 15:30:08 +0800 Subject: [PATCH 011/164] Update prvYieldForTask usage in kernel APIs --- tasks.c | 200 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 118 insertions(+), 82 deletions(-) diff --git a/tasks.c b/tasks.c index ce2a2a2a613..760b2a73dd9 100644 --- a/tasks.c +++ b/tasks.c @@ -2209,7 +2209,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) { - BaseType_t xYieldRequired; + BaseType_t xYieldRequired = pdFALSE; BaseType_t xYieldCoreID; TCB_t * const pxTCB = xTaskToResume; UBaseType_t uxSavedInterruptStatus; @@ -3168,20 +3168,23 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - /* Preemption is on, but a context switch should only be - * performed if the unblocked task has a priority that is - * higher than the currently executing task. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* Pend the yield to be performed when the scheduler - * is unsuspended. */ - /* SMP_TODO : Fix this with other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; - } - else + taskENTER_CRITICAL(); { - mtCOVERAGE_TEST_MARKER(); + BaseType_t xYieldCoreID; + + xYieldCoreID = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); + + /* Preemption is on, but a context switch should only be + * performed if the unblocked task has a priority that is + * higher than the currently executing task. */ + if( taskVALID_CORE_ID( xYieldCoreID ) ) + { + /* Pend the yield to be performed when the scheduler + * is unsuspended. */ + xYieldPendings[ xYieldCoreID ] = pdTRUE; + } } + taskEXIT_CRITICAL(); } #endif /* configUSE_PREEMPTION */ } @@ -3323,27 +3326,32 @@ BaseType_t xTaskIncrementTick( void ) #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { #if ( configNUM_CORES == 1 ) - TCB_t * pxCurrentTCBs[ 1 ] = { NULL }; - - pxCurrentTCBs[ 0 ] = pxCurrentTCB; - #endif /* ( configNUM_CORES == 1 ) */ - - /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not - * force a context switch that would just shuffle tasks around cores */ - /* TODO: There are certainly better ways of doing this that would reduce - * the number of interrupts and also potentially help prevent tasks from - * moving between cores as often. This, however, works for now. */ - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - xCoreYieldList[ x ] = pdTRUE; + xSwitchRequired = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } - } + #else + /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not + * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce + * the number of interrupts and also potentially help prevent tasks from + * moving between cores as often. This, however, works for now. */ + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( configNUM_CORES == 1 ) */ } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -3379,32 +3387,44 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_PREEMPTION */ #if ( configUSE_PREEMPTION == 1 ) - { - BaseType_t xCoreID; - - xCoreID = portGET_CORE_ID(); - - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) - { - if( xCoreYieldList[ x ] != pdFALSE ) + #if ( configNUM_CORES == 1 ) { - if( x == xCoreID ) + /* For single core the core ID is always 0. */ + if( xCoreYieldList[ 0 ] != pdFALSE ) { xSwitchRequired = pdTRUE; } - #if ( configNUM_CORES > 1 ) + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( xCoreYieldList[ x ] != pdFALSE ) + { + if( x == ( UBaseType_t ) xCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( x ); + } + } else { - prvYieldCore( x ); + mtCOVERAGE_TEST_MARKER(); } - #endif /* ( configNUM_CORES > 1 ) */ - } - else - { - mtCOVERAGE_TEST_MARKER(); + } } - } - } + #endif /* ( configNUM_CORES == 1 ) */ #endif /* configUSE_PREEMPTION */ } else @@ -3706,6 +3726,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { TCB_t * pxUnblockedTCB; BaseType_t xReturn; + BaseType_t xYieldCoreID; /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be * called from a critical section within an ISR. */ @@ -3750,7 +3771,9 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); } - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); + + if( taskVALID_CORE_ID( xYieldCoreID ) ) { /* Return true if the task removed from the event list has a higher * priority than the calling task. This allows the calling task to know if @@ -3759,8 +3782,9 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) /* Mark that a yield is pending in case the user is not using the * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - /* SMP_TODO : fix this with other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + #if ( configUSE_PREEMPTION == 1 ) + xYieldPendings[ xYieldCoreID ] = pdTRUE; + #endif } else { @@ -3775,6 +3799,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) { TCB_t * pxUnblockedTCB; + BaseType_t xYieldCoreID; /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event flags implementation. */ @@ -3809,15 +3834,17 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The unblocked task has a priority above that of the calling task, so - * a context switch is required. This function is called with the - * scheduler suspended so xYieldPending is set so the context switch - * occurs immediately that the scheduler is resumed (unsuspended). */ - /* SMP_TODO : fix this with other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; - } + #if ( configUSE_PREEMPTION == 1 ) + taskENTER_CRITICAL(); + { + xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) + { + xYieldPendings[ xYieldCoreID ] = pdTRUE; + } + } + taskEXIT_CRITICAL(); + #endif /* ( configUSE_PREEMPTION == 1 ) */ } /*-----------------------------------------------------------*/ @@ -3908,7 +3935,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - /* SMP_TODO : fix this with other commit. */ + /* Must be called from within a critical section */ xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -4663,14 +4690,26 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } #else - /* SMP_TODO : Fix the interrupt macro in another commit. */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; + UBaseType_t uxSavedInterruptStatus = 0; - portDISABLE_INTERRUPTS(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; - portENABLE_INTERRUPTS(); + portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + + return xReturn; + } + + TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) + { + TaskHandle_t xReturn = NULL; + + if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } return xReturn; } @@ -5741,16 +5780,11 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + #if ( configUSE_PREEMPTION == 1 ) + ( void ) prvYieldForTask( pxTCB, pdFALSE, pdTRUE ); + #endif /* ( configUSE_PREEMPTION == 1 ) */ } else { @@ -5778,6 +5812,7 @@ TickType_t uxTaskResetEventItemValue( void ) uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; UBaseType_t uxSavedInterruptStatus; + BaseType_t xYieldCoreId; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5876,10 +5911,9 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreId ) ) { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; @@ -5888,8 +5922,9 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter to an ISR * safe FreeRTOS function. */ - /* SMP_TODO : Fix this in other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + #if ( configUSE_PREEMPTION == 1 ) + xYieldPendings[ xYieldCoreId ] = pdTRUE; + #endif /* ( configUSE_PREEMPTION == 1 ) */ } else { @@ -5914,6 +5949,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; UBaseType_t uxSavedInterruptStatus; + BaseType_t xYieldCoreId; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5968,10 +6004,9 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreId ) ) { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; @@ -5980,8 +6015,9 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter in an ISR * safe FreeRTOS function. */ - /* SMP_TODO : Fix this in other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + #if ( configUSE_PREEMPTION == 1 ) + xYieldPendings[ xYieldCoreId ] = pdTRUE; + #endif /* ( configUSE_PREEMPTION == 1 ) */ } else { From b58d52db423d3a37dfe499a7ecb5af42a7e6fd5b Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 4 Jul 2022 14:36:38 +0800 Subject: [PATCH 012/164] Merge prvAddNewTaskToReadyList from SMP --- tasks.c | 83 +++++++++++++++++++++++++++------------------------------ 1 file changed, 40 insertions(+), 43 deletions(-) diff --git a/tasks.c b/tasks.c index 760b2a73dd9..01c8d2c07f1 100644 --- a/tasks.c +++ b/tasks.c @@ -1352,17 +1352,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { uxCurrentNumberOfTasks++; - if( pxCurrentTCB == NULL ) + if( xSchedulerRunning == pdFALSE ) { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ - /* SMP_TODO : fix this in other PR. */ - #if ( configNUM_CORES == 1 ) - pxCurrentTCB = pxNewTCB; - #else - pxCurrentTCBs[ portGET_CORE_ID() ] = pxNewTCB; - #endif - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { /* This is the first task to be created so do the preliminary @@ -1374,32 +1365,42 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { mtCOVERAGE_TEST_MARKER(); } - } - else - { - /* If the scheduler is not already running, make this task the - * current task if it is the highest priority task to be created - * so far. */ - if( xSchedulerRunning == pdFALSE ) - { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + + #if ( configNUM_CORES == 1 ) + if( pxCurrentTCB == NULL ) { - /* SMP_TODO : fix this in other PR. */ - #if ( configNUM_CORES == 1 ) - pxCurrentTCB = pxNewTCB; - #else - pxCurrentTCBs[ portGET_CORE_ID() ] = pxNewTCB; - #endif + /* There are no other tasks, or all the other tasks are in + * the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; } else { - mtCOVERAGE_TEST_MARKER(); + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #else + if( pxNewTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = ( UBaseType_t ) 0; xCoreID < ( UBaseType_t ) configNUM_CORES; xCoreID++ ) + { + if( pxCurrentTCBs[ xCoreID ] == NULL ) + { + pxNewTCB->xTaskRunState = xCoreID; + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + } + } + #endif } uxTaskNumber++; @@ -1415,26 +1416,22 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxNewTCB ); portSETUP_TCB( pxNewTCB ); - } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + if( xSchedulerRunning != pdFALSE ) { - taskYIELD_IF_USING_PREEMPTION(); + /* If the created task is of a higher priority than another + * currently running task and preemption is on then it should + * run now. */ + #if ( configUSE_PREEMPTION == 1 ) + ( void ) prvYieldForTask( pxNewTCB, pdFALSE, pdTRUE ); + #endif } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); } /*-----------------------------------------------------------*/ From 8a3f046c48382b94bb46365eb86c60d42aa81def Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 6 Jul 2022 15:07:46 +0800 Subject: [PATCH 013/164] Merge vTaskSwitchContext from SMP * Add vTaskSwitchContextForCore APIs to switch context for specific core * vTaskSwitchContext will switch context for current core --- tasks.c | 75 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/tasks.c b/tasks.c index 01c8d2c07f1..edce0e8a196 100644 --- a/tasks.c +++ b/tasks.c @@ -154,8 +154,6 @@ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ } /* taskSELECT_HIGHEST_PRIORITY_TASK */ - #else - #define taskSELECT_HIGHEST_PRIORITY_TASK prvSelectHighestPriorityTask #endif /*-----------------------------------------------------------*/ @@ -473,7 +471,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, /* * Selects the highest priority available task */ -static BaseType_t prvSelectHighestPriorityTask( void ); +static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); /** * Utility task that simply returns pdTRUE if the task referenced by xTask is @@ -840,27 +838,45 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -/* SMP_TODO : This is a temporay implementation for compilation. - * Update this function in another commit. */ -#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) && ( configNUM_CORES > 1 ) - static BaseType_t prvSelectHighestPriorityTask( void ) +#if ( configNUM_CORES == 1 ) + static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) { - UBaseType_t uxTopPriority = uxTopReadyPriority; + BaseType_t xReturn = pdTRUE; - /* Find the highest priority queue that contains ready tasks. */ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) - { - configASSERT( uxTopPriority ); - --uxTopPriority; - } + /* xCoreID should always be 0 in single core. */ + configASSERT( xCoreID == 0 ); - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of - * the same priority get an equal share of the processor time. */ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ portGET_CORE_ID() ], &( pxReadyTasksLists[ uxTopPriority ] ) ); - uxTopReadyPriority = uxTopPriority; + /* This function must be called after scheduler started. */ + configASSERT( xSchedulerRunning == pdTRUE ); + + taskSELECT_HIGHEST_PRIORITY_TASK(); + + return pdTRUE; } -#endif +#else + #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + /* SMP_TODO : This is a temporay implementation for compilation. + * Update this function in another commit. */ + static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) + { + UBaseType_t uxTopPriority = uxTopReadyPriority; + /* Find the highest priority queue that contains ready tasks. */ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) + { + configASSERT( uxTopPriority ); + --uxTopPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of + * the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ portGET_CORE_ID() ], &( pxReadyTasksLists[ uxTopPriority ] ) ); + uxTopReadyPriority = uxTopPriority; + + return pdTRUE; + } + #endif /* ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ +#endif /* ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) @@ -3555,19 +3571,17 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ -void vTaskSwitchContext( void ) +void vTaskSwitchContextForCore( BaseType_t xCoreID ) { if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { /* The scheduler is currently suspended - do not allow a context * switch. */ - /* SMP_TODO : fix this with other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + xYieldPendings[ xCoreID ] = pdTRUE; } else { - /* SMP_TODO : fix this with other commit. */ - xYieldPendings[ portGET_CORE_ID() ] = pdFALSE; + xYieldPendings[ xCoreID ] = pdFALSE; traceTASK_SWITCHED_OUT(); #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -3610,7 +3624,7 @@ void vTaskSwitchContext( void ) /* Select a new task to run using either the generic C or port * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) prvSelectHighestPriorityTask( xCoreID ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ traceTASK_SWITCHED_IN(); /* After the new task is switched in, update the global errno. */ @@ -3631,6 +3645,17 @@ void vTaskSwitchContext( void ) #endif /* configUSE_NEWLIB_REENTRANT */ } } + +/*-----------------------------------------------------------*/ +void vTaskSwitchContext( void ) +{ + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + vTaskSwitchContextForCore( xCoreID ); +} + /*-----------------------------------------------------------*/ void vTaskPlaceOnEventList( List_t * const pxEventList, From 826fe0810fe7256ad6c19f0d6de806a039418710 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 6 Jul 2022 18:13:05 +0800 Subject: [PATCH 014/164] Merge vTaskDelete from SMP * Add prvYeildCore for single core to reduce multicore macros * Add taskTASK_IS_RUNNING for single core * Add taskTASK_IS_YIELDING --- tasks.c | 46 ++++++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/tasks.c b/tasks.c index edce0e8a196..dcfdb5d3c71 100644 --- a/tasks.c +++ b/tasks.c @@ -264,7 +264,13 @@ #define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) /* Returns pdTRUE if the task is actively running and not scheduled to yield. */ -#define taskTASK_IS_RUNNING( xTaskRunState ) ( ( 0 <= xTaskRunState ) && ( xTaskRunState < configNUM_CORES ) ) +#if ( configNUM_CORES == 1 ) + #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) + #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) +#else + #define taskTASK_IS_RUNNING( pxTCB ) ( ( 0 <= pxTCB->xTaskRunState ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) + #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) +#endif /* Indicates that the task is an Idle task. */ #define taskATTRIBUTE_IS_IDLE ( BaseType_t ) ( 1UL << 0 ) @@ -714,7 +720,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUM_CORES == 1 ) + static void prvYieldCore( BaseType_t xCoreID ) + { + configASSERT( xCoreID == 0 ); + portYIELD_WITHIN_API(); + } +#else static void prvYieldCore( BaseType_t xCoreID ) { /* This must be called from a critical section and @@ -780,7 +792,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xTaskPriority; BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ BaseType_t xCoreID; - TaskRunning_t xTaskRunState; /* This must be called from a critical section. */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); @@ -804,9 +815,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xTaskPriority = xTaskPriority - 1; } - xTaskRunState = pxCurrentTCBs[ xCoreID ]->xTaskRunState; - - if( ( taskTASK_IS_RUNNING( xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) + if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) { if( xTaskPriority <= xLowestPriority ) { @@ -1456,6 +1465,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskDelete( TaskHandle_t xTaskToDelete ) { TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; taskENTER_CRITICAL(); { @@ -1463,6 +1473,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + #if ( configNUM_CORES == 1 ) + xTaskRunningOnCore = ( TaskRunning_t ) 0; + #else + xTaskRunningOnCore = pxTCB->xTaskRunState; + #endif + /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { @@ -1489,9 +1505,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * not return. */ uxTaskNumber++; - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) || taskTASK_IS_YIELDING( pxTCB ) ) { - /* A task is deleting itself. This cannot complete within the + /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. * Place the task in the termination list. The idle task will * check the termination list and free up any memory allocated by @@ -1512,9 +1528,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - /* SMP_TODO : The task deleted not necessary running on the CPU. Fix - * this with pxTCB->xTaskRunState. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ portGET_CORE_ID() ] ); + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ xTaskRunningOnCore ] ); } else { @@ -1526,7 +1540,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called @@ -1536,20 +1549,21 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvDeleteTCB( pxTCB ); } - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ + /* Force a reschedule if the task that has just been deleted was running. */ if( xSchedulerRunning != pdFALSE ) { - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) ) { configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); + prvYieldCore( xTaskRunningOnCore ); } else { mtCOVERAGE_TEST_MARKER(); } + } + taskEXIT_CRITICAL(); } #endif /* INCLUDE_vTaskDelete */ From e364cf94fa3e0de21601cd73b1bf11c8f6af12c7 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Fri, 8 Jul 2022 10:46:52 +0800 Subject: [PATCH 015/164] Merge vTaskSuspend from SMP --- tasks.c | 108 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 71 insertions(+), 37 deletions(-) diff --git a/tasks.c b/tasks.c index dcfdb5d3c71..c0968185d3e 100644 --- a/tasks.c +++ b/tasks.c @@ -731,8 +731,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; { /* This must be called from a critical section and * xCoreID must be valid. */ - configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - configASSERT( taskVALID_CORE_ID( xCoreID ) ); if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) { @@ -860,7 +858,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; taskSELECT_HIGHEST_PRIORITY_TASK(); - return pdTRUE; + return xReturn; } #else #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) @@ -2031,6 +2029,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskSuspend( TaskHandle_t xTaskToSuspend ) { TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; taskENTER_CRITICAL(); { @@ -2040,6 +2039,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) traceTASK_SUSPEND( pxTCB ); + #if ( configNUM_CORES == 1 ) + xTaskRunningOnCore = ( TaskRunning_t ) 0; + #else + xTaskRunningOnCore = pxTCB->xTaskRunState; + #endif + /* Remove task from the ready/delayed list and place in the * suspended list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) @@ -2079,58 +2084,84 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - taskEXIT_CRITICAL(); if( xSchedulerRunning != pdFALSE ) { /* Reset the next expected unblock time in case it referred to the * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); - { - prvResetNextTaskUnblockTime(); - } - taskEXIT_CRITICAL(); + prvResetNextTaskUnblockTime(); } else { mtCOVERAGE_TEST_MARKER(); } - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) ) { if( xSchedulerRunning != pdFALSE ) { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); - } - else - { - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + if( xTaskRunningOnCore == portGET_CORE_ID() ) { - /* No other tasks are ready, so set pxCurrentTCB back to - * NULL so when the next task is created pxCurrentTCB will - * be set to point to it no matter what its relative priority - * is. */ - /* SMP_TODO : fix this in other PR. */ - #if ( configNUM_CORES == 1 ) - pxCurrentTCB = NULL; - #else - pxCurrentTCBs[ portGET_CORE_ID() ] = NULL; - #endif + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); } else { - vTaskSwitchContext(); + prvYieldCore( xTaskRunningOnCore ); } + + taskEXIT_CRITICAL(); + } + else + { + taskEXIT_CRITICAL(); + #if ( configNUM_CORES == 1 ) + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCB back to + * NULL so when the next task is created pxCurrentTCB will + * be set to point to it no matter what its relative priority + * is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + #else + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set the core's TCB back to + * NULL so when the next task is created the core's TCB will + * be able to be set to point to it no matter what its relative + * priority is. */ + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + else + { + /* Attempt to switch in a new task. This could fail since the idle tasks + * haven't been created yet. If it does then set the core's TCB back to + * NULL. */ + if( prvSelectHighestPriorityTask( xTaskRunningOnCore ) == pdFALSE ) + { + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + } + #endif } } else { - mtCOVERAGE_TEST_MARKER(); + taskEXIT_CRITICAL(); } } @@ -5108,11 +5139,14 @@ static void prvResetNextTaskUnblockTime( void ) { portASSERT_IF_IN_ISR(); #if ( configNUM_CORES > 1 ) - /* The only time there would be a problem is if this is called - * before a context switch and vTaskExitCritical() is called - * after pxCurrentTCB changes. Therefore this should not be - * used within vTaskSwitchContext(). */ - prvCheckForRunStateChange(); + if( uxSchedulerSuspended == 0U ) + { + /* The only time there would be a problem is if this is called + * before a context switch and vTaskExitCritical() is called + * after pxCurrentTCB changes. Therefore this should not be + * used within vTaskSwitchContext(). */ + prvCheckForRunStateChange(); + } #endif } } From ae5ca2a40429a04cfcd9b77afd74ba7be909f304 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 20 Jul 2022 15:43:09 +0800 Subject: [PATCH 016/164] Set minimal idle task idle attribute * Set minimal idle task idle attribute in prvInitialiseNewTask --- tasks.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tasks.c b/tasks.c index c0968185d3e..79bb27e808f 100644 --- a/tasks.c +++ b/tasks.c @@ -1348,6 +1348,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { pxNewTCB->xTaskAttribute = taskATTRIBUTE_IS_IDLE; } + else if( pxTaskCode == prvMinimalIdleTask ) + { + pxNewTCB->xTaskAttribute = taskATTRIBUTE_IS_IDLE; + } else { pxNewTCB->xTaskAttribute = 0; From a095cdcd97ddff612e597db8b67b9a5759d5dd47 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 14:46:18 +0800 Subject: [PATCH 017/164] Move prvCreateIdleTasks forward and check return value --- tasks.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tasks.c b/tasks.c index 79bb27e808f..ed1634f6330 100644 --- a/tasks.c +++ b/tasks.c @@ -2488,14 +2488,21 @@ void vTaskStartScheduler( void ) { BaseType_t xReturn; + xReturn = prvCreateIdleTasks(); + #if ( configUSE_TIMERS == 1 ) { - xReturn = xTimerCreateTimerTask(); + if( xReturn == pdPASS ) + { + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } #endif /* configUSE_TIMERS */ - xReturn = prvCreateIdleTasks(); - if( xReturn == pdPASS ) { /* freertos_tasks_c_additions_init() should only be called if the user From e1d1104ad76c8a94f52c3df4504b56e5e8261032 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 15:26:45 +0800 Subject: [PATCH 018/164] Add minimal idle hook config check --- include/FreeRTOS.h | 6 ++++++ tasks.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 17639bd8058..0a361f204b1 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -100,6 +100,12 @@ #error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif +#if ( configNUM_CORES > 1 ) + #ifndef configUSE_MINIMAL_IDLE_HOOK + #error Missing definition: configUSE_MINIMAL_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. + #endif +#endif + #ifndef configUSE_TICK_HOOK #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif diff --git a/tasks.c b/tasks.c index ed1634f6330..4a00229e103 100644 --- a/tasks.c +++ b/tasks.c @@ -4257,7 +4257,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } #endif /* configUSE_TICKLESS_IDLE */ - #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + #if ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) { extern void vApplicationMinimalIdleHook( void ); From 7718e91cc3edc3a2b862c319bf1bfa01329b02e8 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 20 Jul 2022 16:46:36 +0800 Subject: [PATCH 019/164] Fix xTaskResumeAll in SMP * xTaskRusmeAll do nothing when scheduler not running in SMP --- tasks.c | 171 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 88 insertions(+), 83 deletions(-) diff --git a/tasks.c b/tasks.c index 4a00229e103..c55bde4770e 100644 --- a/tasks.c +++ b/tasks.c @@ -2722,112 +2722,117 @@ BaseType_t xTaskResumeAll( void ) * previous call to vTaskSuspendAll(). */ configASSERT( uxSchedulerSuspended ); - /* It is possible that an ISR caused a task to be removed from an event - * list while the scheduler was suspended. If this was the case then the - * removed task will have been added to the xPendingReadyList. Once the - * scheduler has been resumed it is safe to move all the pending ready - * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + #if ( configNUM_CORES > 1 ) + if( xSchedulerRunning != pdFALSE ) + #endif { - BaseType_t xCoreID; + /* It is possible that an ISR caused a task to be removed from an event + * list while the scheduler was suspended. If this was the case then the + * removed task will have been added to the xPendingReadyList. Once the + * scheduler has been resumed it is safe to move all the pending ready + * tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); + xCoreID = portGET_CORE_ID(); - --uxSchedulerSuspended; - portRELEASE_TASK_LOCK(); + --uxSchedulerSuspended; + portRELEASE_TASK_LOCK(); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) - { - if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - /* Move any readied tasks from the pending list into the - * appropriate ready list. */ - while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - portMEMORY_BARRIER(); - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); - - /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. - * If the current core yielded then vTaskSwitchContext() has already been called - * which sets xYieldPendings for the current core to pdTRUE. */ - } + /* Move any readied tasks from the pending list into the + * appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); + portMEMORY_BARRIER(); + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); - if( pxTCB != NULL ) - { - /* A task was unblocked while the scheduler was suspended, - * which may have prevented the next unblock time from being - * re-calculated, in which case re-calculate it now. Mainly - * important for low power tickless implementations, where - * this can prevent an unnecessary exit from low power - * state. */ - prvResetNextTaskUnblockTime(); - } + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ + } - /* If any ticks occurred while the scheduler was suspended then - * they should be processed now. This ensures the tick count does - * not slip, and that any delayed tasks are resumed at the correct - * time. - * - * It should be safe to call xTaskIncrementTick here from any core - * since we are in a critical section and xTaskIncrementTick itself - * protects itself within a critical section. Suspending the scheduler - * from any core causes xTaskIncrementTick to increment uxPendedCounts. */ - { - TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + * which may have prevented the next unblock time from being + * re-calculated, in which case re-calculate it now. Mainly + * important for low power tickless implementations, where + * this can prevent an unnecessary exit from low power + * state. */ + prvResetNextTaskUnblockTime(); + } - if( xPendedCounts > ( TickType_t ) 0U ) + /* If any ticks occurred while the scheduler was suspended then + * they should be processed now. This ensures the tick count does + * not slip, and that any delayed tasks are resumed at the correct + * time. + * + * It should be safe to call xTaskIncrementTick here from any core + * since we are in a critical section and xTaskIncrementTick itself + * protects itself within a critical section. Suspending the scheduler + * from any core causes xTaskIncrementTick to increment uxPendedCounts. */ { - do + TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ + + if( xPendedCounts > ( TickType_t ) 0U ) { - if( xTaskIncrementTick() != pdFALSE ) - { - /* other cores are interrupted from - * within xTaskIncrementTick(). */ - xYieldPendings[ xCoreID ] = pdTRUE; - } - else + do { - mtCOVERAGE_TEST_MARKER(); - } + if( xTaskIncrementTick() != pdFALSE ) + { + /* other cores are interrupted from + * within xTaskIncrementTick(). */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - --xPendedCounts; - } while( xPendedCounts > ( TickType_t ) 0U ); + --xPendedCounts; + } while( xPendedCounts > ( TickType_t ) 0U ); - xPendedTicks = 0; + xPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + + if( xYieldPendings[ xCoreID ] != pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); - } - } + #if ( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif - if( xYieldPendings[ xCoreID ] != pdFALSE ) - { - #if ( configUSE_PREEMPTION != 0 ) + #if ( configNUM_CORES == 1 ) + taskYIELD_IF_USING_PREEMPTION(); + #endif /* ( configNUM_CORES == 1 ) */ + } + else { - xAlreadyYielded = pdTRUE; + mtCOVERAGE_TEST_MARKER(); } - #endif - - #if ( configNUM_CORES == 1 ) - taskYIELD_IF_USING_PREEMPTION(); - #endif /* ( configNUM_CORES == 1 ) */ - } - else - { - mtCOVERAGE_TEST_MARKER(); } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); } - taskEXIT_CRITICAL(); return xAlreadyYielded; } From 0ef26d9b853e104bb1373da68ac1a85f8f4da90b Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 10:44:51 +0800 Subject: [PATCH 020/164] check scheduler suspended when scheduler is running --- tasks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index c55bde4770e..c802e745f70 100644 --- a/tasks.c +++ b/tasks.c @@ -2718,14 +2718,14 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); - #if ( configNUM_CORES > 1 ) if( xSchedulerRunning != pdFALSE ) #endif { + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the * removed task will have been added to the xPendingReadyList. Once the From cc99f57b4288714c41796e874a5e95b708048f6d Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 27 Jul 2022 19:17:21 +0800 Subject: [PATCH 021/164] Move suspend scheduler inside critical section --- tasks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index c802e745f70..39dffbaef0a 100644 --- a/tasks.c +++ b/tasks.c @@ -2722,10 +2722,6 @@ BaseType_t xTaskResumeAll( void ) if( xSchedulerRunning != pdFALSE ) #endif { - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); - /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the * removed task will have been added to the xPendingReadyList. Once the @@ -2737,6 +2733,10 @@ BaseType_t xTaskResumeAll( void ) xCoreID = portGET_CORE_ID(); + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + --uxSchedulerSuspended; portRELEASE_TASK_LOCK(); From 4a601d310efc6be5b9eda24c8334ac13d8466ff3 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 09:08:12 +0800 Subject: [PATCH 022/164] Update comment for uxSchedulerSuspended --- tasks.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 39dffbaef0a..95a0e0bbf91 100644 --- a/tasks.c +++ b/tasks.c @@ -432,7 +432,11 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * moves the task's event list item into the xPendingReadyList, ready for the * kernel to move the task from the pending ready list into the real ready list * when the scheduler is unsuspended. The pending ready list itself can only be - * accessed from a critical section. */ + * accessed from a critical section. + * + * Updates to uxSchedulerSuspended must be protected by both the task and ISR locks and + * must not be done by an ISR. Reads must be protected by either lock and may be done by + * either an ISR or a task. */ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; #if ( configGENERATE_RUN_TIME_STATS == 1 ) From ea47a923c977682f637bf41a9f53ac462df72410 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 14:57:31 +0800 Subject: [PATCH 023/164] Add back xPendingReadyList for single core --- tasks.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tasks.c b/tasks.c index 95a0e0bbf91..67ea4d48b6c 100644 --- a/tasks.c +++ b/tasks.c @@ -2758,9 +2758,22 @@ BaseType_t xTaskResumeAll( void ) listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. - * If the current core yielded then vTaskSwitchContext() has already been called - * which sets xYieldPendings for the current core to pdTRUE. */ + #if ( configNUM_CORES == 1 ) + /* If the moved task has a priority higher than the current + * task then a yield must be performed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #else + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ + #endif } if( pxTCB != NULL ) From 64f41953fab47c7b778ed34a7a485068ec09a194 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 15:10:17 +0800 Subject: [PATCH 024/164] Use critical section for SMP --- tasks.c | 345 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 174 insertions(+), 171 deletions(-) diff --git a/tasks.c b/tasks.c index 67ea4d48b6c..b0a3fc37cd5 100644 --- a/tasks.c +++ b/tasks.c @@ -3297,241 +3297,244 @@ BaseType_t xTaskIncrementTick( void ) TCB_t * pxTCB; TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; - BaseType_t uxSavedInterruptStatus; #if ( configUSE_PREEMPTION == 1 ) UBaseType_t x; BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; #endif /* configUSE_PREEMPTION */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - - /* Called by the portable layer each time a tick interrupt occurs. - * Increments the tick then checks to see if the new tick value will cause any - * tasks to be unblocked. */ - traceTASK_INCREMENT_TICK( xTickCount ); - - /* Tick increment should occur on every kernel timer event. Core 0 has the - * responsibility to increment the tick, or increment the pended ticks if the - * scheduler is suspended. If pended ticks is greater than zero, the core that - * calls xTaskResumeAll has the responsibility to increment the tick. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + #if ( configNUM_CORES > 1 ) + taskENTER_CRITICAL(); + #endif { - /* Minor optimisation. The tick count cannot change in this - * block. */ - const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - - /* Increment the RTOS tick, switching the delayed and overflowed - * delayed lists if it wraps to 0. */ - xTickCount = xConstTickCount; + /* Called by the portable layer each time a tick interrupt occurs. + * Increments the tick then checks to see if the new tick value will cause any + * tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); - if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ - { - taskSWITCH_DELAYED_LISTS(); - } - else + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsibility to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); - } + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - /* See if this tick has made a timeout expire. Tasks are stored in - * the queue in the order of their wake time - meaning once one task - * has been found whose block time has not expired there is no need to - * look any further down the list. */ - if( xConstTickCount >= xNextTaskUnblockTime ) - { - for( ; ; ) + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ { - if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) - { - /* The delayed list is empty. Set xNextTaskUnblockTime - * to the maximum possible value so it is extremely - * unlikely that the - * if( xTickCount >= xNextTaskUnblockTime ) test will pass - * next time through. */ - xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - break; - } - else + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) { - /* The delayed list is not empty, get the value of the - * item at the head of the delayed list. This is the time - * at which the task at the head of the delayed list must - * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); - - if( xConstTickCount < xItemValue ) + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) { - /* It is not time to unblock this item yet, but the - * item value is the time at which the task at the head - * of the blocked list must be removed from the Blocked - * state - so record the item value in - * xNextTaskUnblockTime. */ - xNextTaskUnblockTime = xItemValue; - break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; } else { - mtCOVERAGE_TEST_MARKER(); - } - - /* It is time to remove the item from the Blocked state. */ - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* Is the task waiting on an event also? If so remove - * it from the event list. */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* It is time to remove the item from the Blocked state. */ + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - /* Place the unblocked task into the appropriate ready - * list. */ - prvAddTaskToReadyList( pxTCB ); + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* A task being unblocked cannot cause an immediate - * context switch if preemption is turned off. */ - #if ( configUSE_PREEMPTION == 1 ) - { - BaseType_t xYieldCoreID; + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); - xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); - if( taskVALID_CORE_ID( xYieldCoreID ) ) + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) { - xCoreYieldList[ xYieldCoreID ] = pdTRUE; + BaseType_t xYieldCoreID; + + xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) + { + xCoreYieldList[ xYieldCoreID ] = pdTRUE; + } } + #endif /* configUSE_PREEMPTION */ } - #endif /* configUSE_PREEMPTION */ } } - } - /* Tasks of equal priority to the currently running task will share - * processing time (time slice) if preemption is on, and the application - * writer has not explicitly turned time slicing off. */ - #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) - { - #if ( configNUM_CORES == 1 ) - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - #else - /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not - * force a context switch that would just shuffle tasks around cores */ - /* TODO: There are certainly better ways of doing this that would reduce - * the number of interrupts and also potentially help prevent tasks from - * moving between cores as often. This, however, works for now. */ - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + #if ( configNUM_CORES == 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - xCoreYieldList[ x ] = pdTRUE; + xSwitchRequired = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } - } - #endif /* ( configNUM_CORES == 1 ) */ - } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - - #if ( configUSE_TICK_HOOK == 1 ) - { - /* Guard against the tick hook being called when the pended tick - * count is being unwound (when the scheduler is being unlocked). */ - if( xPendedTicks == ( TickType_t ) 0 ) - { - vApplicationTickHook(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + #else + /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not + * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce + * the number of interrupts and also potentially help prevent tasks from + * moving between cores as often. This, however, works for now. */ + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( configNUM_CORES == 1 ) */ } - } - #endif /* configUSE_TICK_HOOK */ + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - #if ( configUSE_PREEMPTION == 1 ) - { - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + #if ( configUSE_TICK_HOOK == 1 ) { - if( xYieldPendings[ x ] != pdFALSE ) + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicks == ( TickType_t ) 0 ) { - xCoreYieldList[ x ] = pdTRUE; + vApplicationTickHook(); } else { mtCOVERAGE_TEST_MARKER(); } } - } - #endif /* configUSE_PREEMPTION */ + #endif /* configUSE_TICK_HOOK */ - #if ( configUSE_PREEMPTION == 1 ) - #if ( configNUM_CORES == 1 ) + #if ( configUSE_PREEMPTION == 1 ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) { - /* For single core the core ID is always 0. */ - if( xCoreYieldList[ 0 ] != pdFALSE ) + if( xYieldPendings[ x ] != pdFALSE ) { - xSwitchRequired = pdTRUE; + xCoreYieldList[ x ] = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - #else - { - BaseType_t xCoreID; - - xCoreID = portGET_CORE_ID(); + } + #endif /* configUSE_PREEMPTION */ - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + #if ( configUSE_PREEMPTION == 1 ) + #if ( configNUM_CORES == 1 ) { - if( xCoreYieldList[ x ] != pdFALSE ) + /* For single core the core ID is always 0. */ + if( xCoreYieldList[ 0 ] != pdFALSE ) { - if( x == ( UBaseType_t ) xCoreID ) - { - xSwitchRequired = pdTRUE; - } - else - { - prvYieldCore( x ); - } + xSwitchRequired = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - } - #endif /* ( configNUM_CORES == 1 ) */ - #endif /* configUSE_PREEMPTION */ - } - else - { - ++xPendedTicks; + #else + { + BaseType_t xCoreID; - /* The tick hook gets called at regular intervals, even if the - * scheduler is locked. */ - #if ( configUSE_TICK_HOOK == 1 ) + xCoreID = portGET_CORE_ID(); + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( xCoreYieldList[ x ] != pdFALSE ) + { + if( x == ( UBaseType_t ) xCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( x ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* ( configNUM_CORES == 1 ) */ + #endif /* configUSE_PREEMPTION */ + } + else { - vApplicationTickHook(); + ++xPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif } - #endif } - - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL(); + #endif return xSwitchRequired; } From f6faebb087a9cc8f1e653ec4e0894eb113c0190e Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 10:51:02 +0800 Subject: [PATCH 025/164] Add in ISR check in prvCheckForRunStateChange function --- tasks.c | 101 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/tasks.c b/tasks.c index b0a3fc37cd5..40511077288 100644 --- a/tasks.c +++ b/tasks.c @@ -658,66 +658,67 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * core is no longer running, then vTaskSwitchContext() probably should * be run before returning, but we don't have a way to force that to happen * from here. */ - configASSERT( portCHECK_IF_IN_ISR() ); - - /* This function is always called with interrupts disabled - * so this is safe. */ - pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; - - while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + if( portCHECK_IF_IN_ISR() == pdFALSE ) { - /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then do it all over again - * if our state changed again during the reacquisition. */ + /* This function is always called with interrupts disabled + * so this is safe. */ + pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; - uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; - uxPrevSchedulerSuspended = uxSchedulerSuspended; + while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + { + /* We are only here if we just entered a critical section + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ - /* this must only be called the first time we enter into a critical - * section, otherwise it could context switch in the middle of a - * critical section. */ - configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); + uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; + uxPrevSchedulerSuspended = uxSchedulerSuspended; - uxSchedulerSuspended = 0U; + /* this must only be called the first time we enter into a critical + * section, otherwise it could context switch in the middle of a + * critical section. */ + configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); - if( uxPrevCriticalNesting > 0U ) - { - pxThisTCB->uxCriticalNesting = 0U; - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); - } - else - { - /* uxPrevSchedulerSuspended must be 1 */ - portRELEASE_TASK_LOCK(); - } + uxSchedulerSuspended = 0U; - portMEMORY_BARRIER(); - configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); + if( uxPrevCriticalNesting > 0U ) + { + pxThisTCB->uxCriticalNesting = 0U; + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + } + else + { + /* uxPrevSchedulerSuspended must be 1 */ + portRELEASE_TASK_LOCK(); + } - portENABLE_INTERRUPTS(); + portMEMORY_BARRIER(); + configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); - /* Enabling interrupts should cause this core to immediately - * service the pending interrupt and yield. If the run state is still - * yielding here then that is a problem. */ - configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); + portENABLE_INTERRUPTS(); - portDISABLE_INTERRUPTS(); - portGET_TASK_LOCK(); - portGET_ISR_LOCK(); - pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; - uxSchedulerSuspended = uxPrevSchedulerSuspended; + /* Enabling interrupts should cause this core to immediately + * service the pending interrupt and yield. If the run state is still + * yielding here then that is a problem. */ + configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); - if( uxPrevCriticalNesting == 0U ) - { - /* uxPrevSchedulerSuspended must be 1 */ - configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); - portRELEASE_ISR_LOCK(); + portDISABLE_INTERRUPTS(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; + uxSchedulerSuspended = uxPrevSchedulerSuspended; + + if( uxPrevCriticalNesting == 0U ) + { + /* uxPrevSchedulerSuspended must be 1 */ + configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); + portRELEASE_ISR_LOCK(); + } } } } From 5ccb1646a554e189791c2b7cd5d45da0a93ffd97 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 10:00:21 +0800 Subject: [PATCH 026/164] Add critical section protect for context switch --- tasks.c | 134 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 58 deletions(-) diff --git a/tasks.c b/tasks.c index 40511077288..7791fc6fe33 100644 --- a/tasks.c +++ b/tasks.c @@ -3655,77 +3655,95 @@ BaseType_t xTaskIncrementTick( void ) void vTaskSwitchContextForCore( BaseType_t xCoreID ) { - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ + + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_ISR_LOCK(); { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPendings[ xCoreID ] = pdTRUE; - } - else - { - xYieldPendings[ xCoreID ] = pdFALSE; - traceTASK_SWITCHED_OUT(); + /* vTaskSwitchContext() must never be called from within a critical section. + * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); - #if ( configGENERATE_RUN_TIME_STATS == 1 ) + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); - #endif + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + xYieldPendings[ xCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) - { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); - } - else + #if ( configGENERATE_RUN_TIME_STATS == 1 ) { - mtCOVERAGE_TEST_MARKER(); - } + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif - ulTaskSwitchedInTime = ulTotalRunTime; - } - #endif /* configGENERATE_RUN_TIME_STATS */ + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; - } - #endif + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - ( void ) prvSelectHighestPriorityTask( xCoreID ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + ( void ) prvSelectHighestPriorityTask( xCoreID ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - { - /* Switch Newlib's _impure_ptr variable to point to the _reent - * structure specific to this task. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + * structure specific to this task. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ } - #endif /* configUSE_NEWLIB_REENTRANT */ } + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); } /*-----------------------------------------------------------*/ From 431ff1df58d158fe5956fdb12273b32969aa9d07 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 11:10:55 +0800 Subject: [PATCH 027/164] Add vTaskSwitchContextForCore declaration --- include/task.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/task.h b/include/task.h index c3acac51244..2db17f547c0 100644 --- a/include/task.h +++ b/include/task.h @@ -3015,6 +3015,16 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, */ portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run for core. + */ +portDONT_DISCARD void vTaskSwitchContextForCore( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; + /* * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY * THE EVENT BITS MODULE. From 28fc702255de1afea12e35d3d4bc391d54456ae3 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 11:43:18 +0800 Subject: [PATCH 028/164] Fix missing macro and check for single core --- include/FreeRTOS.h | 30 ++++++++++++++++++++++++++++++ tasks.c | 6 ++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 0a361f204b1..0ff9e1d67fe 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -349,6 +349,36 @@ #endif /* portRELEASE_TASK_LOCK */ +#ifndef portGET_TASK_LOCK + + #if ( configNUM_CORES == 1 ) + #define portGET_TASK_LOCK() + #else + #error portGET_TASK_LOCK is required in SMP + #endif + +#endif /* portGET_TASK_LOCK */ + +#ifndef portRELEASE_ISR_LOCK + + #if ( configNUM_CORES == 1 ) + #define portRELEASE_ISR_LOCK() + #else + #error portRELEASE_ISR_LOCK is required in SMP + #endif + +#endif /* portRELEASE_ISR_LOCK */ + +#ifndef portGET_ISR_LOCK + + #if ( configNUM_CORES == 1 ) + #define portGET_ISR_LOCK() + #else + #error portGET_ISR_LOCK is required in SMP + #endif + +#endif /* portGET_ISR_LOCK */ + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 diff --git a/tasks.c b/tasks.c index 7791fc6fe33..8d2f8e4e558 100644 --- a/tasks.c +++ b/tasks.c @@ -3666,9 +3666,11 @@ void vTaskSwitchContextForCore( BaseType_t xCoreID ) portGET_TASK_LOCK(); /* Must always acquire the task lock first */ portGET_ISR_LOCK(); { - /* vTaskSwitchContext() must never be called from within a critical section. + /* vTaskSwitchContextForCore() must never be called from within a critical section. * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ - configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); + #endif if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { From ae523524e5aeb4f67b052500b65c8f9e0948e621 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 1 Aug 2022 12:51:34 +0800 Subject: [PATCH 029/164] Fix task delete condition * Latest kernel move out the prvDeleteTask and the check condition should be TASK_IS_RUNNING --- tasks.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/tasks.c b/tasks.c index 8d2f8e4e558..9f978261489 100644 --- a/tasks.c +++ b/tasks.c @@ -1512,6 +1512,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * not return. */ uxTaskNumber++; + /* If the task is running (or yielding), we must add it to the + * termination list so that an idle task can delete it when it is + * no longer running. */ if( taskTASK_IS_RUNNING( pxTCB ) || taskTASK_IS_YIELDING( pxTCB ) ) { /* A running task is being deleted. This cannot complete within the @@ -1547,30 +1550,38 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } + taskEXIT_CRITICAL(); /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called * from prvCheckTasksWaitingTermination which is called from Idle task. */ - if( pxTCB != pxCurrentTCB ) + if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( taskTASK_IS_YIELDING( pxTCB ) == pdFALSE ) ) { prvDeleteTCB( pxTCB ); } /* Force a reschedule if the task that has just been deleted was running. */ - if( xSchedulerRunning != pdFALSE ) + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) ) ) { - if( taskTASK_IS_RUNNING( pxTCB ) ) - { + #if ( configNUM_CORES == 1 ) configASSERT( uxSchedulerSuspended == 0 ); - prvYieldCore( xTaskRunningOnCore ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - + portYIELD_WITHIN_API(); + #else + taskENTER_CRITICAL(); + { + if( xTaskRunningOnCore == portGET_CORE_ID() ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + } + taskEXIT_CRITICAL(); + #endif } - taskEXIT_CRITICAL(); } #endif /* INCLUDE_vTaskDelete */ From fdbd9357ea4a98ae9d3d72a9e6a9b29c95074d3f Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sat, 6 Aug 2022 08:24:43 +0800 Subject: [PATCH 030/164] Use critical section to protect more in SMP for vTaskDelete * The condition task is running is not thread safe in SMP * Once we add the task to termination the task is still running and may add it back to other list. Which cause memory corruption. --- tasks.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index 9f978261489..c3dfa4a9288 100644 --- a/tasks.c +++ b/tasks.c @@ -1550,7 +1550,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); + #if ( configNUM_CORES == 1 ) + taskEXIT_CRITICAL(); + #endif /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called @@ -1567,7 +1569,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( uxSchedulerSuspended == 0 ); portYIELD_WITHIN_API(); #else - taskENTER_CRITICAL(); { if( xTaskRunningOnCore == portGET_CORE_ID() ) { @@ -1579,9 +1580,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvYieldCore( xTaskRunningOnCore ); } } - taskEXIT_CRITICAL(); #endif } + + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL(); + #endif } #endif /* INCLUDE_vTaskDelete */ @@ -2124,7 +2128,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* The current task has just been suspended. */ configASSERT( uxSchedulerSuspended == 0 ); - vTaskYieldWithinAPI(); + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + vTaskYieldWithinAPI(); + #else + portYIELD_WITHIN_API(); + #endif } else { From 72228c353855276fbd455fef0c3eb7fc986ef4b2 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 11 Jul 2022 17:41:05 +0800 Subject: [PATCH 031/164] Merge SMP prvSelectHighestPriorityTask to main --- tasks.c | 121 +++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 24 deletions(-) diff --git a/tasks.c b/tasks.c index c3dfa4a9288..b4aa4b3559a 100644 --- a/tasks.c +++ b/tasks.c @@ -458,11 +458,13 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t */ static BaseType_t prvCreateIdleTasks( void ); -/* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a critical section and yields if so. - */ -static void prvCheckForRunStateChange( void ); +#if ( configNUM_CORES > 1 ) + /* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields if so. + */ + static void prvCheckForRunStateChange( void ); +#endif /* ( configNUM_CORES > 1 ) */ /* * Yields the given core. @@ -850,8 +852,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) +static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) +{ + #if ( configNUM_CORES == 1 ) { BaseType_t xReturn = pdTRUE; @@ -865,30 +868,100 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; return xReturn; } -#else - #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) - /* SMP_TODO : This is a temporay implementation for compilation. - * Update this function in another commit. */ - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) + #else + { + UBaseType_t uxCurrentPriority = uxTopReadyPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xDecrementTopPriority = pdTRUE; + + while( xTaskScheduled == pdFALSE ) { - UBaseType_t uxTopPriority = uxTopReadyPriority; + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) + { + List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); + ListItem_t * pxLastTaskItem = pxReadyList->pxIndex->pxPrevious; + ListItem_t * pxTaskItem = pxLastTaskItem; + + if( ( void * ) pxLastTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + pxLastTaskItem = pxLastTaskItem->pxPrevious; + } + + /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority + * must not be decremented any further. */ + xDecrementTopPriority = pdFALSE; + + do + { + TCB_t * pxTCB; + + pxTaskItem = pxTaskItem->pxNext; + + if( ( void * ) pxTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + pxTaskItem = pxTaskItem->pxNext; + } + + pxTCB = pxTaskItem->pvOwner; + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + /* If the task is not being executed by any core swap it in. */ + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + pxCurrentTCBs[ xCoreID ] = pxTCB; + xTaskScheduled = pdTRUE; + } + else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) + { + configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); + + /* The task is already running on this core, mark it as scheduled. */ + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + xTaskScheduled = pdTRUE; + } + else + { + /* This task is running on the core other than xCoreID. */ + mtCOVERAGE_TEST_MARKER(); + } - /* Find the highest priority queue that contains ready tasks. */ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) + if( xTaskScheduled != pdFALSE ) + { + /* Once a task has been selected to run on this core, + * move it to the end of the ready task list. */ + uxListRemove( pxTaskItem ); + vListInsertEnd( pxReadyList, pxTaskItem ); + break; + } + } while( pxTaskItem != pxLastTaskItem ); + } + else { - configASSERT( uxTopPriority ); - --uxTopPriority; + if( xDecrementTopPriority != pdFALSE ) + { + uxTopReadyPriority--; + } } - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of - * the same priority get an equal share of the processor time. */ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ portGET_CORE_ID() ], &( pxReadyTasksLists[ uxTopPriority ] ) ); - uxTopReadyPriority = uxTopPriority; + /* This function can get called by vTaskSuspend() before the scheduler is started. + * In that case, since the idle tasks have not yet been created it is possible that we + * won't find a new task to schedule. Return pdFALSE in this case. */ + if( ( xSchedulerRunning == pdFALSE ) && ( uxCurrentPriority == tskIDLE_PRIORITY ) && ( xTaskScheduled == pdFALSE ) ) + { + break; + } - return pdTRUE; + configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); + uxCurrentPriority--; } - #endif /* ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ -#endif /* ( configNUM_CORES == 1 ) */ + + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); + + return xTaskScheduled; + } + #endif /* ( configNUM_CORES == 1 ) */ +} /*-----------------------------------------------------------*/ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) From 893fdbe98c5e9287e8ecdcdd761dcf708291d4da Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 14 Jul 2022 10:07:51 +0800 Subject: [PATCH 032/164] Merge prvCheckTasksWaitingTermination from SMP branch --- tasks.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/tasks.c b/tasks.c index b4aa4b3559a..84f2e47efb9 100644 --- a/tasks.c +++ b/tasks.c @@ -4572,14 +4572,31 @@ static void prvCheckTasksWaitingTermination( void ) { taskENTER_CRITICAL(); { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; + /* For SMP, multiple idles can be running simultaneously + * and we need to check that other idles did not cleanup while we were + * waiting to enter the critical section. */ + if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( taskTASK_IS_YIELDING( pxTCB ) == pdFALSE ) ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + prvDeleteTCB( pxTCB ); + } + else + { + /* The TCB to be deleted still has not yet been switched out + * by the scheduler, so we will just exit this loop early and + * try again next time. */ + taskEXIT_CRITICAL(); + break; + } + } } taskEXIT_CRITICAL(); - - prvDeleteTCB( pxTCB ); } } #endif /* INCLUDE_vTaskDelete */ From 39ec7cec87ab2cdea68d0d11eff77ae20cd7480d Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 1 Aug 2022 12:53:53 +0800 Subject: [PATCH 033/164] Move prvDeleteTCB outside of critical section --- tasks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 84f2e47efb9..0a8a928320a 100644 --- a/tasks.c +++ b/tasks.c @@ -4584,7 +4584,6 @@ static void prvCheckTasksWaitingTermination( void ) ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); --uxCurrentNumberOfTasks; --uxDeletedTasksWaitingCleanUp; - prvDeleteTCB( pxTCB ); } else { @@ -4597,6 +4596,8 @@ static void prvCheckTasksWaitingTermination( void ) } } taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); } } #endif /* INCLUDE_vTaskDelete */ From 7dc3d5477c49c7060861fafe1ac28f289ff04905 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 2 Aug 2022 19:34:08 +0800 Subject: [PATCH 034/164] Add NULL pointer check in prvCheckTasksWaitingTermination --- tasks.c | 169 +++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 107 insertions(+), 62 deletions(-) diff --git a/tasks.c b/tasks.c index 0a8a928320a..bfdbaf4a153 100644 --- a/tasks.c +++ b/tasks.c @@ -474,7 +474,7 @@ static void prvYieldCore( BaseType_t xCoreID ); /* * Yields a core, or cores if multiple priorities are not allowed to run * simultaneously, to allow the task pxTCB to run. Negative value is returned if - * yeilding for task is not required. Otherwise, core ID is returned. + * yeilding for the task is not required. Otherwise, core ID is returned. */ static BaseType_t prvYieldForTask( TCB_t * pxTCB, const BaseType_t xPreemptEqualPriority, @@ -727,18 +727,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) - static void prvYieldCore( BaseType_t xCoreID ) +static void prvYieldCore( BaseType_t xCoreID ) +{ + #if ( configNUM_CORES == 1 ) { configASSERT( xCoreID == 0 ); portYIELD_WITHIN_API(); } -#else - static void prvYieldCore( BaseType_t xCoreID ) + #else { - /* This must be called from a critical section and - * xCoreID must be valid. */ - + /* This must be called from a critical section and xCoreID must be valid. */ if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) { xYieldPendings[ xCoreID ] = pdTRUE; @@ -759,13 +757,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } } -#endif + #endif +} /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) - static BaseType_t prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority, - BaseType_t xYieldForTask ) +static BaseType_t prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority, + BaseType_t xYieldForTask ) +{ + #if ( configNUM_CORES == 1 ) { BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ @@ -788,10 +788,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; return xLowestPriorityCore; } -#else - static BaseType_t prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority, - BaseType_t xYieldForTask ) + #else { BaseType_t xLowestPriority; BaseType_t xTaskPriority; @@ -848,7 +845,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; return xLowestPriorityCore; } -#endif /* ( configNUM_CORES == 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ +} /*-----------------------------------------------------------*/ @@ -1554,9 +1552,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) pxTCB = prvGetTCBFromHandle( xTaskToDelete ); #if ( configNUM_CORES == 1 ) + { xTaskRunningOnCore = ( TaskRunning_t ) 0; + } #else + { xTaskRunningOnCore = pxTCB->xTaskRunState; + } #endif /* Remove task from the ready/delayed list. */ @@ -1639,23 +1641,24 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) ) ) { #if ( configNUM_CORES == 1 ) + { configASSERT( uxSchedulerSuspended == 0 ); portYIELD_WITHIN_API(); + } #else + { + if( xTaskRunningOnCore == portGET_CORE_ID() ) { - if( xTaskRunningOnCore == portGET_CORE_ID() ) - { - configASSERT( uxSchedulerSuspended == 0 ); - vTaskYieldWithinAPI(); - } - else - { - prvYieldCore( xTaskRunningOnCore ); - } + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); } + } #endif } - #if ( configNUM_CORES > 1 ) taskEXIT_CRITICAL(); #endif @@ -2344,7 +2347,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* Check if Yield is required for this Task in prvYieldForTask. */ #if ( configUSE_PREEMPTION == 1 ) + { ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + } #endif } else @@ -2417,7 +2422,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - xYieldRequired = pdFALSE; + mtCOVERAGE_TEST_MARKER(); } ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); @@ -2682,8 +2687,9 @@ void vTaskEndScheduler( void ) } /*----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) - void vTaskSuspendAll( void ) +void vTaskSuspendAll( void ) +{ + #if ( configNUM_CORES == 1 ) { /* A critical section is not required as the variable is of type * BaseType_t. Please read Richard Barry's reply in the following link to a @@ -2702,8 +2708,7 @@ void vTaskEndScheduler( void ) * the above increment elsewhere. */ portMEMORY_BARRIER(); } -#else - void vTaskSuspendAll( void ) + #else /* ( configNUM_CORES == 1 ) */ { UBaseType_t ulState; @@ -2731,9 +2736,20 @@ void vTaskEndScheduler( void ) ++uxSchedulerSuspended; portRELEASE_ISR_LOCK(); - if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) + if( uxSchedulerSuspended == 1U ) + { + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + prvCheckForRunStateChange(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else { - prvCheckForRunStateChange(); + mtCOVERAGE_TEST_MARKER(); } portCLEAR_INTERRUPT_MASK( ulState ); @@ -2743,7 +2759,8 @@ void vTaskEndScheduler( void ) mtCOVERAGE_TEST_MARKER(); } } -#endif /* ( configNUM_CORES == 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ +} /*----------------------------------------------------------*/ @@ -2816,6 +2833,8 @@ BaseType_t xTaskResumeAll( void ) BaseType_t xAlreadyYielded = pdFALSE; #if ( configNUM_CORES > 1 ) + /* Scheduler running status is not checked in vTaskSuspendAll in single + * core implementation. This condition is only required for multiple cores. */ if( xSchedulerRunning != pdFALSE ) #endif { @@ -2852,6 +2871,7 @@ BaseType_t xTaskResumeAll( void ) prvAddTaskToReadyList( pxTCB ); #if ( configNUM_CORES == 1 ) + { /* If the moved task has a priority higher than the current * task then a yield must be performed. */ if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) @@ -2862,6 +2882,7 @@ BaseType_t xTaskResumeAll( void ) { mtCOVERAGE_TEST_MARKER(); } + } #else /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. * If the current core yielded then vTaskSwitchContext() has already been called @@ -2927,7 +2948,9 @@ BaseType_t xTaskResumeAll( void ) #endif #if ( configNUM_CORES == 1 ) + { taskYIELD_IF_USING_PREEMPTION(); + } #endif /* ( configNUM_CORES == 1 ) */ } else @@ -3511,6 +3534,7 @@ BaseType_t xTaskIncrementTick( void ) #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { #if ( configNUM_CORES == 1 ) + { if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { xSwitchRequired = pdTRUE; @@ -3519,7 +3543,9 @@ BaseType_t xTaskIncrementTick( void ) { mtCOVERAGE_TEST_MARKER(); } + } #else + { /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not * force a context switch that would just shuffle tasks around cores */ /* TODO: There are certainly better ways of doing this that would reduce @@ -3536,6 +3562,7 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } + } #endif /* ( configNUM_CORES == 1 ) */ } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -3572,44 +3599,46 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_PREEMPTION */ #if ( configUSE_PREEMPTION == 1 ) + { #if ( configNUM_CORES == 1 ) + { + /* For single core the core ID is always 0. */ + if( xCoreYieldList[ 0 ] != pdFALSE ) { - /* For single core the core ID is always 0. */ - if( xCoreYieldList[ 0 ] != pdFALSE ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + xSwitchRequired = pdTRUE; } - #else + else { - BaseType_t xCoreID; + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); + xCoreID = portGET_CORE_ID(); - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( xCoreYieldList[ x ] != pdFALSE ) { - if( xCoreYieldList[ x ] != pdFALSE ) + if( x == ( UBaseType_t ) xCoreID ) { - if( x == ( UBaseType_t ) xCoreID ) - { - xSwitchRequired = pdTRUE; - } - else - { - prvYieldCore( x ); - } + xSwitchRequired = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + prvYieldCore( x ); } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + } #endif /* ( configNUM_CORES == 1 ) */ + } #endif /* configUSE_PREEMPTION */ } else @@ -3999,7 +4028,9 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) /* Mark that a yield is pending in case the user is not using the * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ #if ( configUSE_PREEMPTION == 1 ) + { xYieldPendings[ xYieldCoreID ] = pdTRUE; + } #endif } else @@ -4051,6 +4082,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, prvAddTaskToReadyList( pxUnblockedTCB ); #if ( configUSE_PREEMPTION == 1 ) + { taskENTER_CRITICAL(); { xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); @@ -4060,6 +4092,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, } } taskEXIT_CRITICAL(); + } #endif /* ( configUSE_PREEMPTION == 1 ) */ } /*-----------------------------------------------------------*/ @@ -4564,7 +4597,7 @@ static void prvCheckTasksWaitingTermination( void ) #if ( INCLUDE_vTaskDelete == 1 ) { - TCB_t * pxTCB; + TCB_t * pxTCB = NULL; /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() * being called too often in the idle task. */ @@ -4597,7 +4630,11 @@ static void prvCheckTasksWaitingTermination( void ) } taskEXIT_CRITICAL(); - prvDeleteTCB( pxTCB ); + if( pxTCB != NULL ) + { + prvDeleteTCB( pxTCB ); + pxTCB = NULL; + } } } #endif /* INCLUDE_vTaskDelete */ @@ -4930,7 +4967,9 @@ static void prvResetNextTaskUnblockTime( void ) UBaseType_t uxSavedInterruptStatus = 0; uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); - xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + { + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + } portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); return xReturn; @@ -6020,7 +6059,9 @@ TickType_t uxTaskResetEventItemValue( void ) /* The notified task has a priority above the currently * executing task so a yield is required. */ #if ( configUSE_PREEMPTION == 1 ) + { ( void ) prvYieldForTask( pxTCB, pdFALSE, pdTRUE ); + } #endif /* ( configUSE_PREEMPTION == 1 ) */ } else @@ -6160,7 +6201,9 @@ TickType_t uxTaskResetEventItemValue( void ) * using the "xHigherPriorityTaskWoken" parameter to an ISR * safe FreeRTOS function. */ #if ( configUSE_PREEMPTION == 1 ) + { xYieldPendings[ xYieldCoreId ] = pdTRUE; + } #endif /* ( configUSE_PREEMPTION == 1 ) */ } else @@ -6253,7 +6296,9 @@ TickType_t uxTaskResetEventItemValue( void ) * using the "xHigherPriorityTaskWoken" parameter in an ISR * safe FreeRTOS function. */ #if ( configUSE_PREEMPTION == 1 ) + { xYieldPendings[ xYieldCoreId ] = pdTRUE; + } #endif /* ( configUSE_PREEMPTION == 1 ) */ } else From fbbc5975b69ff5fa62913de7e5bff576461194c3 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 20 Sep 2022 11:55:05 +0800 Subject: [PATCH 035/164] Update for performance * Remove prvSelectHighestPriorityTask and vTaskSwitchContextForCore for -O0 performance in single core --- tasks.c | 75 +++++++++++++++++++++++++++------------------------------ 1 file changed, 35 insertions(+), 40 deletions(-) diff --git a/tasks.c b/tasks.c index bfdbaf4a153..b13147e4aa1 100644 --- a/tasks.c +++ b/tasks.c @@ -850,29 +850,11 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, /*-----------------------------------------------------------*/ -static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) -{ - #if ( configNUM_CORES == 1 ) - { - BaseType_t xReturn = pdTRUE; - - /* xCoreID should always be 0 in single core. */ - configASSERT( xCoreID == 0 ); - - /* This function must be called after scheduler started. */ - configASSERT( xSchedulerRunning == pdTRUE ); - - taskSELECT_HIGHEST_PRIORITY_TASK(); - - return xReturn; - } - #else - { - UBaseType_t uxCurrentPriority = uxTopReadyPriority; - BaseType_t xTaskScheduled = pdFALSE; - BaseType_t xDecrementTopPriority = pdTRUE; - - while( xTaskScheduled == pdFALSE ) +#if ( configNUM_CORES > 1 ) + #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + /* SMP_TODO : This is a temporay implementation for compilation. + * Update this function in another commit. */ + static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) { if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) { @@ -953,13 +935,8 @@ static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); uxCurrentPriority--; } - - configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); - - return xTaskScheduled; - } - #endif /* ( configNUM_CORES == 1 ) */ -} + #endif /* ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ +#endif /* ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) @@ -3774,7 +3751,11 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ -void vTaskSwitchContextForCore( BaseType_t xCoreID ) +#if ( configNUM_CORES == 1 ) + void vTaskSwitchContext( void ) +#else + void vTaskSwitchContextForCore( BaseType_t xCoreID ) +#endif { /* Acquire both locks: * - The ISR lock protects the ready list from simultaneous access by @@ -3797,11 +3778,19 @@ void vTaskSwitchContextForCore( BaseType_t xCoreID ) { /* The scheduler is currently suspended - do not allow a context * switch. */ - xYieldPendings[ xCoreID ] = pdTRUE; + #if ( configNUM_CORES == 1 ) + xYieldPendings[ 0 ] = pdTRUE; + #else + xYieldPendings[ xCoreID ] = pdTRUE; + #endif } else { - xYieldPendings[ xCoreID ] = pdFALSE; + #if ( configNUM_CORES == 1 ) + xYieldPendings[ 0 ] = pdFALSE; + #else + xYieldPendings[ xCoreID ] = pdFALSE; + #endif traceTASK_SWITCHED_OUT(); #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -3844,7 +3833,11 @@ void vTaskSwitchContextForCore( BaseType_t xCoreID ) /* Select a new task to run using either the generic C or port * optimised asm code. */ - ( void ) prvSelectHighestPriorityTask( xCoreID ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + #if ( configNUM_CORES == 1 ) + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + #else + ( void ) prvSelectHighestPriorityTask( xCoreID ); + #endif traceTASK_SWITCHED_IN(); /* After the new task is switched in, update the global errno. */ @@ -3870,14 +3863,16 @@ void vTaskSwitchContextForCore( BaseType_t xCoreID ) } /*-----------------------------------------------------------*/ -void vTaskSwitchContext( void ) -{ - BaseType_t xCoreID; +#if ( configNUM_CORES > 1 ) + void vTaskSwitchContext( void ) + { + BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); + xCoreID = portGET_CORE_ID(); - vTaskSwitchContextForCore( xCoreID ); -} + vTaskSwitchContextForCore( xCoreID ); + } +#endif /*-----------------------------------------------------------*/ From 2c5b814d6b9c507f887ce86d0df2ea9b1440ba34 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 20 Sep 2022 14:14:36 +0800 Subject: [PATCH 036/164] Update prvSelectHighestPriorityTask --- tasks.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tasks.c b/tasks.c index b13147e4aa1..73016b5971d 100644 --- a/tasks.c +++ b/tasks.c @@ -851,10 +851,14 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) - #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) - /* SMP_TODO : This is a temporay implementation for compilation. - * Update this function in another commit. */ - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) + + static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) + { + UBaseType_t uxCurrentPriority = uxTopReadyPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xDecrementTopPriority = pdTRUE; + + while( xTaskScheduled == pdFALSE ) { if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) { @@ -935,8 +939,14 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); uxCurrentPriority--; } - #endif /* ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ + + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); + + return xTaskScheduled; + } + #endif /* ( configNUM_CORES > 1 ) */ + /*-----------------------------------------------------------*/ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) From ac273089ba246aeed9cd0906674e2f6dfadaffca Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 14 Jul 2022 11:55:31 +0800 Subject: [PATCH 037/164] Merge vTaskYieldWithinAPI from SMP Update vTaskYieldWithinAPI from SMP * xTaskDelayUntil * xTaskDelay * ulTaskGenericNotifyTake * xTaskGenericNotifyWait * event_groups.c * queue.c * timers.c Add critical section protection * xTaskGetSchedulerState Update state check macro * vTaskGetInfo * eTaskGetState --- event_groups.c | 4 ++-- queue.c | 10 +++++----- tasks.c | 41 ++++++++++++++++++++++++++--------------- timers.c | 2 +- 4 files changed, 34 insertions(+), 23 deletions(-) diff --git a/event_groups.c b/event_groups.c index 27390e6794a..0ecfefa77ab 100644 --- a/event_groups.c +++ b/event_groups.c @@ -258,7 +258,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -410,7 +410,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { diff --git a/queue.c b/queue.c index c7e759ce17e..40b27849b02 100644 --- a/queue.c +++ b/queue.c @@ -89,7 +89,7 @@ typedef struct SemaphoreData * performed just because a higher priority task has been woken. */ #define queueYIELD_IF_USING_PREEMPTION() #else - #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() #endif /* @@ -1021,7 +1021,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * is also a higher priority task in the pending ready list. */ if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } } else @@ -1482,7 +1482,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1674,7 +1674,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1852,7 +1852,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { diff --git a/tasks.c b/tasks.c index 73016b5971d..80f968169a0 100644 --- a/tasks.c +++ b/tasks.c @@ -64,7 +64,7 @@ * performed just because a higher priority task has been woken. */ #define taskYIELD_IF_USING_PREEMPTION() #else - #if configNUM_CORES == 1 + #if ( configNUM_CORES == 1 ) #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() #else #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() @@ -1664,10 +1664,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxPreviousWakeTime ); configASSERT( ( xTimeIncrement > 0U ) ); - configASSERT( uxSchedulerSuspended == 0 ); vTaskSuspendAll(); { + configASSERT( uxSchedulerSuspended == 1 ); + /* Minor optimisation. The tick count cannot change in this * block. */ const TickType_t xConstTickCount = xTickCount; @@ -1728,7 +1729,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1775,7 +1776,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1796,7 +1797,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) ) { /* The task calling this function is querying its own state. */ eReturn = eRunning; @@ -4693,7 +4694,7 @@ static void prvCheckTasksWaitingTermination( void ) * state is just set to whatever is passed in. */ if( eState != eInvalid ) { - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) ) { pxTaskStatus->eCurrentState = eRunning; } @@ -5008,14 +5009,18 @@ static void prvResetNextTaskUnblockTime( void ) } else { - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) - { - xReturn = taskSCHEDULER_RUNNING; - } - else + taskENTER_CRITICAL(); { - xReturn = taskSCHEDULER_SUSPENDED; + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } } + taskEXIT_CRITICAL(); } return xReturn; @@ -5318,7 +5323,13 @@ static void prvResetNextTaskUnblockTime( void ) xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } - +#else + #if ( configNUM_CORES == 1 ) + void vTaskYieldWithinAPI( void ) + { + portYIELD_WITHIN_API(); + } + #endif #endif /* portCRITICAL_NESTING_IN_TCB */ /*-----------------------------------------------------------*/ @@ -5831,7 +5842,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -5910,7 +5921,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { diff --git a/timers.c b/timers.c index 1019f7de9d8..7804e11c333 100644 --- a/timers.c +++ b/timers.c @@ -647,7 +647,7 @@ * block time to expire. If a command arrived between the * critical section being exited and this yield then the yield * will not cause the task to block. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { From 35c589ef3f1c9297e6a9a3ecc7414dd2acd6aacb Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sat, 16 Jul 2022 12:03:43 +0800 Subject: [PATCH 038/164] Merge vTaskPrioritySet from SMP branch --- tasks.c | 83 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 39 deletions(-) diff --git a/tasks.c b/tasks.c index 80f968169a0..4c8cb8123d7 100644 --- a/tasks.c +++ b/tasks.c @@ -1954,6 +1954,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; + BaseType_t xYieldForTask = pdFALSE; + BaseType_t xCoreID; configASSERT( uxNewPriority < configMAX_PRIORITIES ); @@ -1988,22 +1990,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( uxCurrentBasePriority != uxNewPriority ) { /* The priority change may have readied a task of higher - * priority than the calling task. */ + * priority than a running task. */ if( uxNewPriority > uxCurrentBasePriority ) { - if( pxTCB != pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) { - /* The priority of a task other than the currently - * running task is being raised. Is the priority being - * raised above that of the running task? */ - if( uxNewPriority >= pxCurrentTCB->uxPriority ) - { - xYieldRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* The priority of a task is being raised so + * perform a yield for this task later. */ + xYieldForTask = pdTRUE; } else { @@ -2012,9 +2006,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * priority task able to run so no yield is required. */ } } - else if( pxTCB == pxCurrentTCB ) + else if( taskTASK_IS_RUNNING( pxTCB ) ) { - /* Setting the priority of the running task down means + /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ xYieldRequired = pdTRUE; @@ -2089,17 +2083,31 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - mtCOVERAGE_TEST_MARKER(); + /* It's possible that xYieldForTask was already set to pdTRUE because + * its priority is being raised. However, since it is not in a ready list + * we don't actually need to yield for it. */ + xYieldForTask = pdFALSE; } - if( xYieldRequired != pdFALSE ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_PREEMPTION == 1 ) + if( xYieldRequired != pdFALSE ) + { + #if ( configNUM_CORES == 1 ) + xCoreID = ( BaseType_t ) 0; + #else + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + #endif + prvYieldCore( xCoreID ); + } + else if( xYieldForTask != pdFALSE ) + { + prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* Remove compiler warning about unused variables when the port * optimised task selection is not being used. */ @@ -5305,13 +5313,16 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) - -/* - * If not in a critical section then yield immediately. - * Otherwise set xYieldPendings to true to wait to - * yield until exiting the critical section. - */ +#if ( configNUM_CORES == 1 ) + void vTaskYieldWithinAPI( void ) + { + portYIELD_WITHIN_API(); + } +#else + /*If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ void vTaskYieldWithinAPI( void ) { if( pxCurrentTCB->uxCriticalNesting == 0U ) @@ -5323,14 +5334,8 @@ static void prvResetNextTaskUnblockTime( void ) xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } -#else - #if ( configNUM_CORES == 1 ) - void vTaskYieldWithinAPI( void ) - { - portYIELD_WITHIN_API(); - } - #endif -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif + /*-----------------------------------------------------------*/ #if ( portCRITICAL_NESTING_IN_TCB == 1 ) From 453487cac23ebcca12faaacabaa85610da8f457c Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 3 Aug 2022 15:32:40 +0800 Subject: [PATCH 039/164] Void prvYieldForTask return value in vTaskPrioritySet --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 4c8cb8123d7..78e4d31f9da 100644 --- a/tasks.c +++ b/tasks.c @@ -2101,7 +2101,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else if( xYieldForTask != pdFALSE ) { - prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); } else { From fdaf43553bbbed301cfe681445aee1afe076d40e Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 27 Jul 2022 19:21:37 +0800 Subject: [PATCH 040/164] Yield for SMP when set priority --- tasks.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tasks.c b/tasks.c index 78e4d31f9da..5985f1911dd 100644 --- a/tasks.c +++ b/tasks.c @@ -1993,18 +1993,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * priority than a running task. */ if( uxNewPriority > uxCurrentBasePriority ) { - if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) + #if ( configNUM_CORES == 1 ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) + #endif { /* The priority of a task is being raised so * perform a yield for this task later. */ xYieldForTask = pdTRUE; } - else - { - /* The priority of the running task is being raised, - * but the running task must already be the highest - * priority task able to run so no yield is required. */ - } } else if( taskTASK_IS_RUNNING( pxTCB ) ) { From 411d55585fa820e6ae3dd14307523141f05822b0 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 10:57:52 +0800 Subject: [PATCH 041/164] Move vTaskDelay check uxSchedulerSuspended --- tasks.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 5985f1911dd..90a30aed743 100644 --- a/tasks.c +++ b/tasks.c @@ -1751,9 +1751,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* A delay time of zero just forces a reschedule. */ if( xTicksToDelay > ( TickType_t ) 0U ) { - configASSERT( uxSchedulerSuspended == 0 ); vTaskSuspendAll(); { + /* Move the assert inside since there can be multiple cores running. */ + configASSERT( uxSchedulerSuspended == 1 ); + traceTASK_DELAY(); /* A task that is removed from the event list while the @@ -2086,12 +2088,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } #if ( configUSE_PREEMPTION == 1 ) + { if( xYieldRequired != pdFALSE ) { #if ( configNUM_CORES == 1 ) + { xCoreID = ( BaseType_t ) 0; + } #else + { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + } #endif prvYieldCore( xCoreID ); } @@ -2103,6 +2110,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { mtCOVERAGE_TEST_MARKER(); } + } #endif /* Remove compiler warning about unused variables when the port From b5458776387975e64be0ee11667d53d202b27633 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 21 Sep 2022 11:50:26 +0800 Subject: [PATCH 042/164] Update code logic for performance --- tasks.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/tasks.c b/tasks.c index 90a30aed743..1216a7430a7 100644 --- a/tasks.c +++ b/tasks.c @@ -1957,7 +1957,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; BaseType_t xYieldForTask = pdFALSE; - BaseType_t xCoreID; configASSERT( uxNewPriority < configMAX_PRIORITIES ); @@ -2089,27 +2088,36 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #if ( configUSE_PREEMPTION == 1 ) { - if( xYieldRequired != pdFALSE ) + #if ( configNUM_CORES == 1 ) { - #if ( configNUM_CORES == 1 ) + /* For single core, yield for task behaves the same as yield current core. */ + if( xYieldForTask != pdFALSE ) { - xCoreID = ( BaseType_t ) 0; + xYieldRequired = pdTRUE; } - #else + + if( xYieldRequired != pdFALSE ) { - xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + taskYIELD_IF_USING_PREEMPTION(); } - #endif - prvYieldCore( xCoreID ); - } - else if( xYieldForTask != pdFALSE ) - { - ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); } - else + #else { - mtCOVERAGE_TEST_MARKER(); + if( xYieldRequired != pdFALSE ) + { + prvYieldCore( ( BaseType_t ) pxTCB->xTaskRunState ); + } + else if( xYieldForTask != pdFALSE ) + { + ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + #endif + } #endif From 120c2d84020f36aa6deaa860e528a0c027a2eecb Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 21 Sep 2022 15:59:52 +0800 Subject: [PATCH 043/164] Fix yield for task in single core --- tasks.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/tasks.c b/tasks.c index 1216a7430a7..2dedf5618b6 100644 --- a/tasks.c +++ b/tasks.c @@ -2088,36 +2088,37 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #if ( configUSE_PREEMPTION == 1 ) { - #if ( configNUM_CORES == 1 ) + if( xYieldRequired != pdFALSE ) { - /* For single core, yield for task behaves the same as yield current core. */ - if( xYieldForTask != pdFALSE ) + #if ( configNUM_CORES == 1 ) { - xYieldRequired = pdTRUE; + taskYIELD_IF_USING_PREEMPTION(); } - - if( xYieldRequired != pdFALSE ) + #else { - taskYIELD_IF_USING_PREEMPTION(); + prvYieldCore( ( BaseType_t ) pxTCB->xTaskRunState ); } + #endif } - #else + else if( xYieldForTask != pdFALSE ) { - if( xYieldRequired != pdFALSE ) + #if ( configNUM_CORES == 1 ) { - prvYieldCore( ( BaseType_t ) pxTCB->xTaskRunState ); + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } } - else if( xYieldForTask != pdFALSE ) + #else { ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); } - #endif - } #endif From 150d6ced0bdd23e2e62d9518b294e61873860a56 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 18 Jul 2022 11:42:19 +0800 Subject: [PATCH 044/164] Merge timer change from SMP branch * Split xTimerGenericCommand into xTimerGenericCommandFromTask and xTimerGenericCommandFromISR to remove the recursion path when called from ISRs. * Add portTIMER_CALLBACK_ATTRIBUTE for timer callback function --- include/FreeRTOS.h | 4 ++ include/timers.h | 26 +++++++-- timers.c | 129 +++++++++++++++++++++++++++++---------------- 3 files changed, 110 insertions(+), 49 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 0ff9e1d67fe..ca511693714 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -394,6 +394,10 @@ #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined. #endif /* configTIMER_TASK_STACK_DEPTH */ + #ifndef portTIMER_CALLBACK_ATTRIBUTE + #define portTIMER_CALLBACK_ATTRIBUTE + #endif /* portTIMER_CALLBACK_ATTRIBUTE */ + #endif /* configUSE_TIMERS */ #ifndef portSET_INTERRUPT_MASK_FROM_ISR diff --git a/include/timers.h b/include/timers.h index d255c3986e7..fba7b76dc4b 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1328,12 +1328,28 @@ TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; * for use by the kernel only. */ BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; -BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +/* + * Splitting the xTimerGenericCommand into two sub functions and making it a macro + * removes a recursion path when called from ISRs. This is primarily for the XCore + * XCC port which detects the recursion path and throws an error during compilation + * when this is not split. + */ +BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +#define xTimerGenericCommand( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) \ + ( ( xCommandID ) < tmrFIRST_FROM_ISR_COMMAND ? \ + xTimerGenericCommandFromTask( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) : \ + xTimerGenericCommandFromISR( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) ) #if ( configUSE_TRACE_FACILITY == 1 ) void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; diff --git a/timers.c b/timers.c index 7804e11c333..e0ac7a98fcd 100644 --- a/timers.c +++ b/timers.c @@ -78,7 +78,7 @@ ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif @@ -103,6 +103,7 @@ typedef struct tmrCallbackParameters { + portTIMER_CALLBACK_ATTRIBUTE PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ void * pvParameter1; /* << The value that will be used as the callback functions first parameter. */ uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ @@ -382,52 +383,92 @@ traceTIMER_CREATE( pxNewTimer ); } /*-----------------------------------------------------------*/ - - BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) - { - BaseType_t xReturn = pdFAIL; + + BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) + { + BaseType_t xReturn = pdFAIL; DaemonTaskMessage_t xMessage; - configASSERT( xTimer ); - - /* Send a message to the timer service task to perform a particular action - * on a particular timer definition. */ - if( xTimerQueue != NULL ) - { - /* Send a command to the timer service task to start the xTimer timer. */ - xMessage.xMessageID = xCommandID; - xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; - xMessage.u.xTimerParameters.pxTimer = xTimer; - - if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) - { - if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) - { - xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); - } - else - { - xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); - } - } - else - { - xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); - } - - traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + ( void ) pxHigherPriorityTaskWoken; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + * on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + configASSERT( xCommandID < tmrFIRST_FROM_ISR_COMMAND ); + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) + { + BaseType_t xReturn = pdFAIL; + DaemonTaskMessage_t xMessage; - return xReturn; - } + ( void ) xTicksToWait; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + * on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + configASSERT( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ); + + if( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ) + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*-----------------------------------------------------------*/ TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) From ad7644d6addc2ad5ce3975bbf40efdd7b5a50646 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 26 Jul 2022 11:31:44 +0800 Subject: [PATCH 045/164] Add RP2040 SMP porting support --- .../ThirdParty/GCC/RP2040/include/portmacro.h | 128 ++++++- .../GCC/RP2040/include/rp2040_config.h | 19 + portable/ThirdParty/GCC/RP2040/library.cmake | 6 +- portable/ThirdParty/GCC/RP2040/port.c | 358 +++++++++++++++--- 4 files changed, 443 insertions(+), 68 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index ffc93a6b419..7239e05f83d 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -35,6 +35,8 @@ #endif #include "pico.h" + #include "hardware/sync.h" + /*----------------------------------------------------------- * Port specific definitions. * @@ -104,28 +106,130 @@ #define xPortSysTickHandler isr_systick #endif - #define portCHECK_IF_IN_ISR() ({ \ - uint32_t ulIPSR; \ - __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ - ((uint8_t)ulIPSR)>0;}) +/*-----------------------------------------------------------*/ + +/* Multi-core */ + #define portMAX_CORE_COUNT 2 + #ifndef configNUM_CORES + #define configNUM_CORES 2 + #endif + + /* Check validity of number of cores specified in config */ + #if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES ) + #error "Invalid number of cores specified in config!" + #endif + + #if ( configTICK_CORE < 0 || configTICK_CORE > configNUM_CORES ) + #error "Invalid tick core specified in config!" + #endif + /* FreeRTOS core id is always zero based, so always 0 if we're running on only one core */ + #if configNUM_CORES == portMAX_CORE_COUNT + #define portGET_CORE_ID() get_core_num() + #else + #define portGET_CORE_ID() 0 + #endif + #define portCHECK_IF_IN_ISR() ({ \ + uint32_t ulIPSR; \ + __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ + ((uint8_t)ulIPSR)>0;}) + + void vYieldCore(int xCoreID); + #define portYIELD_CORE(a) vYieldCore(a) + #define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + /*-----------------------------------------------------------*/ /* Critical section management. */ - extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) + + #define portSET_INTERRUPT_MASK() ({ \ + uint32_t ulState; \ + __asm volatile ("mrs %0, PRIMASK" : "=r" (ulState)::); \ + __asm volatile ( " cpsid i " ::: "memory" ); \ + ulState;}) + + #define portCLEAR_INTERRUPT_MASK(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + + #if configNUM_CORES == 1 + extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); + extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) + #else + extern UBaseType_t vTaskEnterCriticalFromISR( void ); + extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); + #define portSET_INTERRUPT_MASK_FROM_ISR() vTaskEnterCriticalFromISR() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vTaskExitCriticalFromISR( x ) + #endif #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) extern void vPortEnableInterrupts(); #define portENABLE_INTERRUPTS() vPortEnableInterrupts() - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() + #if ( configNUM_CORES == 1 ) + extern void vPortEnterCritical( void ); + extern void vPortExitCritical( void ); + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() + #else + extern void vTaskEnterCritical( void ); + extern void vTaskExitCritical( void ); + #define portENTER_CRITICAL() vTaskEnterCritical() + #define portEXIT_CRITICAL() vTaskExitCritical() + #endif + + #define portRTOS_SPINLOCK_COUNT 2 + + /* Note this is a single method with uxAcquire parameter since we have + * static vars, the method is always called with a compile time constant for + * uxAcquire, and the compiler should dothe right thing! */ + static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { + static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; + configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); + uint32_t ulCoreNum = get_core_num(); + uint32_t ulLockBit = 1u << ulLockNum; + configASSERT(ulLockBit < 256u ); + if( uxAcquire ) + { + if( __builtin_expect( !*pxSpinLock, 0 ) ) + { + if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + { + configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); + ucRecursionCountByLock[ulLockNum]++; + return; + } + while ( __builtin_expect( !*pxSpinLock, 0 ) ); + } + __mem_fence_acquire(); + configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); + ucRecursionCountByLock[ulLockNum] = 1; + ucOwnedByCore[ulCoreNum] |= ulLockBit; + } else { + configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); + configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); + if( !--ucRecursionCountByLock[ulLockNum] ) + { + ucOwnedByCore[ulCoreNum] &= ~ulLockBit; + __mem_fence_release(); + *pxSpinLock = 1; + } + } + } + + #if ( configNUM_CORES == 1 ) + #define portGET_ISR_LOCK() + #define portRELEASE_ISR_LOCK() + #define portGET_TASK_LOCK() + #define portRELEASE_TASK_LOCK() + #else + #define portGET_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdTRUE) + #define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdFALSE) + #define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdTRUE) + #define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdFALSE) + #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h index eb731232f4f..195ea7cbd18 100644 --- a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h +++ b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h @@ -63,6 +63,25 @@ extern "C" { #endif #endif +#if ( configNUM_CORES > 1 ) + /* configTICK_CORE indicates which core should handle the SysTick + * interrupts */ + #ifndef configTICK_CORE + #define configTICK_CORE 0 + #endif +#endif + +/* This SMP port requires two spin locks, which are claimed from the SDK. + * the spin lock numbers to be used are defined statically and defaulted here + * to the values nominally set aside for RTOS by the SDK */ +#ifndef configSMP_SPINLOCK_0 + #define configSMP_SPINLOCK_0 PICO_SPINLOCK_ID_OS1 +#endif + +#ifndef configSMP_SPINLOCK_1 + #define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2 +#endif + #ifdef __cplusplus }; #endif diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index 3c6d12d15fc..a50e25be51f 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -27,11 +27,13 @@ target_include_directories(FreeRTOS-Kernel INTERFACE target_link_libraries(FreeRTOS-Kernel INTERFACE FreeRTOS-Kernel-Core pico_base_headers - hardware_exception) + hardware_clocks + hardware_exception + pico_multicore +) target_compile_definitions(FreeRTOS-Kernel INTERFACE LIB_FREERTOS_KERNEL=1 - FREERTOS_KERNEL_SMP=0 ) add_library(FreeRTOS-Kernel-Static INTERFACE) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index bf6ff151ac0..d365441aa9c 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -46,6 +46,9 @@ #include "pico/multicore.h" #endif /* LIB_PICO_MULTICORE */ +/* TODO : consider to remove this macro. */ +#define portRUNNING_ON_BOTH_CORES ( configNUM_CORES == portMAX_CORE_COUNT ) + /* Constants required to manipulate the NVIC. */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) @@ -159,9 +162,16 @@ static UBaseType_t uxCriticalNesting; /*-----------------------------------------------------------*/ -#define INVALID_LAUNCH_CORE_NUM 0xffu -static uint8_t ucLaunchCoreNum = INVALID_LAUNCH_CORE_NUM; -#define portIS_FREE_RTOS_CORE() ( ucLaunchCoreNum == get_core_num() ) +#define INVALID_PRIMARY_CORE_NUM 0xffu +/* The primary core number (the own which has the SysTick handler) */ +static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM; + +/* Note: portIS_FREE_RTOS_CORE() also returns false until the scheduler is started */ +#if ( portRUNNING_ON_BOTH_CORES == 1 ) + #define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM) +#else + #define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum == get_core_num()) +#endif /* * See header file for description. @@ -204,6 +214,7 @@ void vPortSVCHandler( void ) void vPortStartFirstTask( void ) { +#if ( configNUM_CORES == 1 ) __asm volatile ( " .syntax unified \n" " ldr r2, =pxCurrentTCB \n"/* Obtain location of pxCurrentTCB. */ @@ -221,6 +232,46 @@ void vPortStartFirstTask( void ) " cpsie i \n"/* The first task has its context and interrupts can be enabled. */ " bx r3 \n"/* Finally, jump to the user defined task code. */ ); +#else + __asm volatile ( + " .syntax unified \n" + #if configRESET_STACK_POINTER + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + #endif /* configRESET_STACK_POINTER */ + #if portRUNNING_ON_BOTH_CORES + " adr r1, ulAsmLocals \n"/* Get the location of the current TCB for the current core. */ + " ldmia r1!, {r2, r3} \n" + " ldr r2, [r2] \n"/* r2 = Core number */ + " lsls r2, #2 \n" + " ldr r3, [r3, r2] \n"/* r3 = pxCurrentTCBs[get_core_num()] */ + #else + " ldr r3, =pxCurrentTCBs \n" + " ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */ + #endif /* portRUNNING_ON_BOTH_CORES */ + " ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " movs r0, #2 \n"/* Switch to the psp stack. */ + " msr CONTROL, r0 \n" + " isb \n" + " pop {r0-r5} \n"/* Pop the registers that are saved automatically. */ + " mov lr, r5 \n"/* lr is now in r5. */ + " pop {r3} \n"/* Return address is now in r3. */ + " pop {r2} \n"/* Pop and discard XPSR. */ + " cpsie i \n"/* The first task has its context and interrupts can be enabled. */ + " bx r3 \n"/* Finally, jump to the user defined task code. */ + #if portRUNNING_ON_BOTH_CORES + " \n" + " .align 4 \n" + "ulAsmLocals: \n" + " .word 0xD0000000 \n"/* SIO */ + " .word pxCurrentTCBs \n" + #endif /* portRUNNING_ON_BOTH_CORES */ + ); +#endif } /*-----------------------------------------------------------*/ @@ -230,66 +281,158 @@ void vPortStartFirstTask( void ) /* We must remove the contents (which we don't care about) * to clear the IRQ */ multicore_fifo_drain(); + + /* And explicitly clear any other IRQ flags. */ multicore_fifo_clear_irq(); - BaseType_t xHigherPriorityTaskWoken = pdFALSE; - uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock ); - EventBits_t ulBits = uxCrossCoreEventBits; - uxCrossCoreEventBits &= ~ulBits; - spin_unlock( pxCrossCoreSpinLock, ulSave ); - xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken ); - portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + + #if ( portRUNNING_ON_BOTH_CORES == 1 ) + portYIELD_FROM_ISR( pdTRUE ); + #elif ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock ); + EventBits_t ulBits = uxCrossCoreEventBits; + uxCrossCoreEventBits &= ~ulBits; + spin_unlock( pxCrossCoreSpinLock, ulSave ); + xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken ); + portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + #endif /* portRUNNING_ON_BOTH_CORES */ } #endif -/* - * See header file for description. - */ -BaseType_t xPortStartScheduler( void ) -{ - /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ - portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; - portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; - - #if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1) - exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); - exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); - exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); - #endif +#if ( configNUM_CORES > 1 ) + /* + * See header file for description. + */ + static BaseType_t xPortStartSchedulerOnCore() + { + if( ucPrimaryCoreNum == get_core_num()) + { + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 ) + exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); + #endif + } + + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; - /* Start the timer that generates the tick ISR. Interrupts are disabled - * here already. */ - vPortSetupTimerInterrupt(); - - /* Initialise the critical nesting count ready for the first task. */ - uxCriticalNesting = 0; - - ucLaunchCoreNum = get_core_num(); - #if (LIB_PICO_MULTICORE == 1) - #if ( configSUPPORT_PICO_SYNC_INTEROP == 1) - multicore_fifo_clear_irq(); - multicore_fifo_drain(); - uint32_t irq_num = 15 + get_core_num(); - irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY ); - irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler ); - irq_set_enabled( irq_num, 1 ); + #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 ) + exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); + exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); #endif + + /* Install FIFO handler to receive interrupt from other core */ + multicore_fifo_clear_irq(); + multicore_fifo_drain(); + uint32_t ulIRQNum = SIO_IRQ_PROC0 + get_core_num(); + irq_set_priority( ulIRQNum, portMIN_INTERRUPT_PRIORITY ); + irq_set_exclusive_handler( ulIRQNum, prvFIFOInterruptHandler ); + irq_set_enabled( ulIRQNum, 1 ); + + /* Start the first task. */ + vPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimisation does not remove the + * symbol. */ + vTaskSwitchContextForCore( portGET_CORE_ID() ); + prvTaskExitError(); + + /* Should not get here! */ + return 0; + } + + #if portRUNNING_ON_BOTH_CORES + static void prvDisableInterruptsAndPortStartSchedulerOnCore( void ) + { + portDISABLE_INTERRUPTS(); + xPortStartSchedulerOnCore(); + } #endif - /* Start the first task. */ - vPortStartFirstTask(); + /* + * See header file for description. + */ + BaseType_t xPortStartScheduler( void ) + { + configASSERT( ucPrimaryCoreNum == INVALID_PRIMARY_CORE_NUM ); - /* Should never get here as the tasks will now be executing! Call the task - * exit error function to prevent compiler warnings about a static function - * not being called in the case that the application writer overrides this - * functionality by defining configTASK_RETURN_ADDRESS. Call - * vTaskSwitchContext() so link time optimisation does not remove the - * symbol. */ - vTaskSwitchContext(); - prvTaskExitError(); + /* No one else should use these! */ + spin_lock_claim( configSMP_SPINLOCK_0 ); + spin_lock_claim( configSMP_SPINLOCK_1 ); + + #if portRUNNING_ON_BOTH_CORES + ucPrimaryCoreNum = configTICK_CORE; + configASSERT( get_core_num() == 0) ; // we must be started on core 0 + multicore_launch_core1( prvDisableInterruptsAndPortStartSchedulerOnCore ); + #else + ucPrimaryCoreNum = get_core_num(); + #endif + xPortStartSchedulerOnCore(); + + /* Should not get here! */ + return 0; + } + +#else + /* + * See header file for description. + */ + BaseType_t xPortStartScheduler( void ) + { + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1) + exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); + exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); + exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); + #endif + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + ucPrimaryCoreNum = get_core_num(); + #if (LIB_PICO_MULTICORE == 1) + #if ( configSUPPORT_PICO_SYNC_INTEROP == 1) + multicore_fifo_clear_irq(); + multicore_fifo_drain(); + uint32_t irq_num = 15 + get_core_num(); + irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY ); + irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler ); + irq_set_enabled( irq_num, 1 ); + #endif + #endif + + /* Start the first task. */ + vPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimisation does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; + } +#endif - /* Should not get here! */ - return 0; -} /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) @@ -369,12 +512,25 @@ void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) ::: "memory" ); } + +/*-----------------------------------------------------------*/ + +void vYieldCore( int xCoreID ) +{ + configASSERT(xCoreID != portGET_CORE_ID()); + #if portRUNNING_ON_BOTH_CORES + /* Non blocking, will cause interrupt on other core if the queue isn't already full, + in which case an IRQ must be pending */ + sio_hw->fifo_wr = 0; + #endif +} + /*-----------------------------------------------------------*/ void xPortPendSVHandler( void ) { /* This is a naked function. */ - +#if ( configNUM_CORES == 1 ) __asm volatile ( " .syntax unified \n" @@ -451,6 +607,100 @@ void xPortPendSVHandler( void ) " \n" " bx r3 \n" ); +#else + __asm volatile + ( + " .syntax unified \n" + " mrs r1, psp \n" + " \n" + " adr r0, ulAsmLocals2 \n"/* Get the location of the current TCB for the current core. */ + " ldmia r0!, {r2, r3} \n" + #if portRUNNING_ON_BOTH_CORES + " ldr r0, [r2] \n"/* r0 = Core number */ + " lsls r0, r0, #2 \n" + " adds r3, r0 \n"/* r3 = &pxCurrentTCBs[get_core_num()] */ + #else + " \n"/* r3 = &pxCurrentTCBs[0] */ + #endif /* portRUNNING_ON_BOTH_CORES */ + " ldr r0, [r3] \n"/* r0 = pxCurrentTCB */ + " \n" + " subs r1, r1, #32 \n"/* Make space for the remaining low registers. */ + " str r1, [r0] \n"/* Save the new top of stack. */ + " stmia r1!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* Store the high registers. */ + " mov r5, r9 \n" + " mov r6, r10 \n" + " mov r7, r11 \n" + " stmia r1!, {r4-r7} \n" + #if portUSE_DIVIDER_SAVE_RESTORE + /* We expect that the divider is ready at this point (which is + * necessary to safely save/restore), because: + * a) if we have not been interrupted since we entered this method, + * then >8 cycles have clearly passed, so the divider is done + * b) if we were interrupted in the interim, then any "safe" - i.e. + * does the right thing in an IRQ - use of the divider should + * have waited for any in-process divide to complete, saved and + * then fully restored the result, thus the result is ready in + * that case too. */ + " ldr r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */ + " ldr r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */ + " ldr r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */ + " ldr r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */ + /* We actually save the divider state in the 4 words below + * our recorded stack pointer, so as not to disrupt the stack + * frame expected by debuggers - this is addressed by + * portEXTRA_STACK_SIZE */ + " subs r1, r1, #48 \n" + " stmia r1!, {r4-r7} \n" + #endif /* portUSE_DIVIDER_SAVE_RESTORE */ + #if portRUNNING_ON_BOTH_CORES + " ldr r0, [r2] \n"/* r0 = Core number */ + #else + " movs r0, #0 \n" + #endif /* portRUNNING_ON_BOTH_CORES */ + " push {r3, r14} \n" + " cpsid i \n" + " bl vTaskSwitchContextForCore \n" + " cpsie i \n" + " pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */ + " \n" + " ldr r1, [r2] \n" + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " adds r0, r0, #16 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Pop the high registers. */ + " mov r8, r4 \n" + " mov r9, r5 \n" + " mov r10, r6 \n" + " mov r11, r7 \n" + " \n" + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " \n" + #if portUSE_DIVIDER_SAVE_RESTORE + " movs r2, #0xd \n"/* Pop the divider state. */ + " lsls r2, #28 \n" + " subs r0, r0, #48 \n"/* Go back for the divider state */ + " ldmia r0!, {r4-r7} \n"/* Pop the divider state. */ + /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the + * results stopping the calculation anyway, however the sign of results + * is adjusted by the h/w at read time based on whether the last started + * division was signed and the inputs' signs differed */ + " str r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */ + " str r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */ + " str r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */ + " str r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */ + #else + " subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */ + #endif /* portUSE_DIVIDER_SAVE_RESTORE */ + " ldmia r0!, {r4-r7} \n"/* Pop low registers. */ + " \n" + " bx r3 \n" + " \n" + " .align 4 \n" + "ulAsmLocals2: \n" + " .word 0xD0000000 \n"/* SIO */ + " .word pxCurrentTCBs \n" + ); +#endif } /*-----------------------------------------------------------*/ From 8e40e576da4572fdff68ef2e31f9fe91eb757457 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 27 Jul 2022 19:34:37 +0800 Subject: [PATCH 046/164] Seperate task state for SMP and single core --- tasks.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/tasks.c b/tasks.c index 2dedf5618b6..26fee139285 100644 --- a/tasks.c +++ b/tasks.c @@ -1799,12 +1799,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - if( taskTASK_IS_RUNNING( pxTCB ) ) - { - /* The task calling this function is querying its own state. */ - eReturn = eRunning; - } - else + #if ( configNUM_CORES == 1 ) + if( taskTASK_IS_RUNNING( pxTCB ) ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + #endif { taskENTER_CRITICAL(); { @@ -1876,7 +1878,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* If the task is not in any other state, it must be in the * Ready (including pending ready) state. */ - eReturn = eReady; + #if ( configNUM_CORES > 1 ) + if( taskTASK_IS_RUNNING( pxTCB ) ) + { + /* Is it actively running on a core? */ + eReturn = eRunning; + } + else + #endif + { + eReturn = eReady; + } } } From 3fb87f52512cf93cee41f1e46d06b1ccb39575e2 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 27 Jul 2022 20:13:24 +0800 Subject: [PATCH 047/164] Merge configRUN_MULTIPLE_PRIORITIES from SMP branch --- include/FreeRTOS.h | 4 ++ tasks.c | 145 ++++++++++++++++++++++++++++++++++++--------- 2 files changed, 121 insertions(+), 28 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index ca511693714..a20bd40f3d9 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -297,6 +297,10 @@ #define configNUM_CORES 1 #endif +#ifndef configRUN_MULTIPLE_PRIORITIES + #define configRUN_MULTIPLE_PRIORITIES 0 +#endif + #if ( configNUM_CORES > 1 ) #if portCRITICAL_NESTING_IN_TCB == 0 #error portCRITICAL_NESTING_IN_TCB is required in SMP diff --git a/tasks.c b/tasks.c index 26fee139285..6a3ae23d910 100644 --- a/tasks.c +++ b/tasks.c @@ -793,54 +793,87 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xLowestPriority; BaseType_t xTaskPriority; BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ + BaseType_t xYieldCount = 0; BaseType_t xCoreID; /* This must be called from a critical section. */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; - - if( xPreemptEqualPriority == pdFALSE ) - { - /* xLowestPriority will be decremented to -1 if the priority of pxTCB - * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ - --xLowestPriority; - } - - for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority >= uxTopReadyPriority ) + #endif { - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; + xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; - /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ - if( pxCurrentTCBs[ xCoreID ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + if( xPreemptEqualPriority == pdFALSE ) { - xTaskPriority = xTaskPriority - 1; + /* xLowestPriority will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriority; } - if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { - if( xTaskPriority <= xLowestPriority ) + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; + + /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ + if( pxCurrentTCBs[ xCoreID ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + xTaskPriority = xTaskPriority - 1; + } + + if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = xCoreID; + if( xTaskPriority <= xLowestPriority ) + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = xCoreID; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* Yield all currently running non-idle tasks with a priority lower than + * the task that needs to run. */ + if( ( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) + { + prvYieldCore( xCoreID ); + xYieldCount++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 */ } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - if( taskVALID_CORE_ID( xLowestPriorityCore ) ) - { - if( xYieldForTask == pdTRUE ) + if( ( xYieldCount == 0 ) && taskVALID_CORE_ID( xLowestPriorityCore ) ) { - prvYieldCore( xLowestPriorityCore ); + if( xYieldForTask == pdTRUE ) + { + prvYieldCore( xLowestPriorityCore ); + } } + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + /* Verify that the calling core always yields to higher priority tasks. */ + if( ( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE && + ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) + { + configASSERT( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE || taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ); + } + #endif } return xLowestPriorityCore; @@ -857,9 +890,24 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxCurrentPriority = uxTopReadyPriority; BaseType_t xTaskScheduled = pdFALSE; BaseType_t xDecrementTopPriority = pdTRUE; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + BaseType_t xPriorityDropped = pdFALSE; + #endif while( xTaskScheduled == pdFALSE ) { + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + if( uxCurrentPriority < uxTopReadyPriority ) + { + /* We can't schedule any tasks, other than idle, that have a + * priority lower than the priority of a task currently running + * on another core. */ + uxCurrentPriority = tskIDLE_PRIORITY; + } + } + #endif + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) { List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); @@ -888,6 +936,21 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxTCB = pxTaskItem->pvOwner; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* When falling back to the idle priority because only one priority + * level is allowed to run at a time, we should ONLY schedule the true + * idle tasks, not user tasks at the idle priority. */ + if( uxCurrentPriority < uxTopReadyPriority ) + { + if( ( pxTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) + { + continue; + } + } + } + #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { /* If the task is not being executed by any core swap it in. */ @@ -925,6 +988,11 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, if( xDecrementTopPriority != pdFALSE ) { uxTopReadyPriority--; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + xPriorityDropped = pdTRUE; + } + #endif } } @@ -940,7 +1008,28 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, uxCurrentPriority--; } - configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); + if( xTaskScheduled == pdTRUE ) + { + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( xPriorityDropped != pdFALSE ) + { + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + UBaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + { + if( pxCurrentTCBs[ x ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + prvYieldCore( x ); + } + } + } + #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + } return xTaskScheduled; } From 6ac175710908ede0763dc7cc9d86d55b32921b6a Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Fri, 29 Jul 2022 11:00:45 +0800 Subject: [PATCH 048/164] Merge configUSE_TASK_PREEMPTION_DISABLE from SMP --- include/FreeRTOS.h | 19 ++++++++++ include/task.h | 66 ++++++++++++++++++++++++++++++++ tasks.c | 93 ++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 167 insertions(+), 11 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index a20bd40f3d9..2e1ba6ab549 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -246,6 +246,10 @@ #define configUSE_COUNTING_SEMAPHORES 0 #endif +#ifndef configUSE_TASK_PREEMPTION_DISABLE + #define configUSE_TASK_PREEMPTION_DISABLE 0 +#endif + #ifndef configUSE_ALTERNATIVE_API #define configUSE_ALTERNATIVE_API 0 #endif @@ -1036,6 +1040,18 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif +#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable +#endif + +#if( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configUSE_PREEMPTION must be set to 1 to use task preemption disable +#endif + +#if( ( configNUM_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configUSE_TASK_PREEMPTION_DISABLE is not supported in single core FreeRTOS +#endif + #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif @@ -1292,6 +1308,9 @@ typedef struct xSTATIC_TCB BaseType_t xDummy23[ 2 ]; #endif uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDummy24; + #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index 2db17f547c0..dc77ac2acb4 100644 --- a/include/task.h +++ b/include/task.h @@ -1225,6 +1225,72 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + /** + * @brief Disables preemption for a task. + * + * @param xTask The handle of the task to disable preemption. Passing NULL + * disables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ + void vTaskPreemptionDisable( const TaskHandle_t xTask ); +#endif + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + /** + * @brief Enables preemption for a task. + * + * @param xTask The handle of the task to enable preemption. Passing NULL + * enables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ + void vTaskPreemptionEnable( const TaskHandle_t xTask ); +#endif + /*----------------------------------------------------------- * SCHEDULER CONTROL *----------------------------------------------------------*/ diff --git a/tasks.c b/tasks.c index 6a3ae23d910..5f742aadc8d 100644 --- a/tasks.c +++ b/tasks.c @@ -300,6 +300,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted */ + #endif + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ #endif @@ -828,8 +832,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, { if( xTaskPriority <= xLowestPriority ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = xCoreID; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = xCoreID; + } } else { @@ -1439,6 +1448,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { + pxNewTCB->xPreemptionDisable = 0; + } + #endif + /* Initialize the TCB stack to look as if the task was already running, * but had been interrupted by the scheduler. The return address is set * to the start of the task function. Once the stack has been initialised @@ -2109,7 +2124,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ - xYieldRequired = pdTRUE; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxTCB->xPreemptionDisable == pdFALSE ) + #endif + { + xYieldRequired = pdTRUE; + } } else { @@ -2234,6 +2254,52 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionDisable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdTRUE; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdFALSE; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + prvYieldCore( xCoreID ); + } + } + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + #if ( INCLUDE_vTaskSuspend == 1 ) void vTaskSuspend( TaskHandle_t xTaskToSuspend ) @@ -3730,21 +3796,26 @@ BaseType_t xTaskIncrementTick( void ) for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) { - if( xCoreYieldList[ x ] != pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif { - if( x == ( UBaseType_t ) xCoreID ) + if( xCoreYieldList[ x ] != pdFALSE ) { - xSwitchRequired = pdTRUE; + if( x == ( UBaseType_t ) xCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( x ); + } } else { - prvYieldCore( x ); + mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } } } #endif /* ( configNUM_CORES == 1 ) */ From a7503ba9cd2af8084c92a56e93305f08ecc22d9a Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 2 Aug 2022 17:27:03 +0800 Subject: [PATCH 049/164] Merge configUSE_CORE_AFFINITY from SMP --- include/FreeRTOS.h | 3 + include/task.h | 127 +++++++++++++++++++++++ tasks.c | 245 ++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 363 insertions(+), 12 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 2e1ba6ab549..46325477bc8 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1301,6 +1301,9 @@ typedef struct xSTATIC_TCB #if ( portUSING_MPU_WRAPPERS == 1 ) xMPU_SETTINGS xDummy2; #endif + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) + UBaseType_t uxDummy25; + #endif StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; diff --git a/include/task.h b/include/task.h index dc77ac2acb4..18b454abac5 100644 --- a/include/task.h +++ b/include/task.h @@ -185,6 +185,13 @@ typedef enum */ #define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) +/** + * Defines affinity to all available cores. + * + * \ingroup TaskUtils + */ +#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) + /** * task. h * @@ -362,6 +369,16 @@ typedef enum TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -480,6 +497,17 @@ typedef enum StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; #endif /* configSUPPORT_STATIC_ALLOCATION */ +#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -558,6 +586,12 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -648,6 +682,12 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -1225,6 +1265,93 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +#if ( configUSE_CORE_AFFINITY == 1 ) + /** + * @brief Sets the core affinity mask for a task. + * + * It sets the cores on which a task can run. configUSE_CORE_AFFINITY must + * be defined as 1 for this function to be available. + * + * @param xTask The handle of the task to set the core affinity mask for. + * Passing NULL will set the core affinity mask for the calling task. + * + * @param uxCoreAffinityMask A bitwise value that indicates the cores on + * which the task can run. Cores are numbered from 0 to configNUM_CORES - 1. + * For example, to ensure that a task can run on core 0 and core 1, set + * uxCoreAffinityMask to 0x03. + * + * Example usage: + * + * // The function that creates task. + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * // Define the core affinity mask such that this task can only run + * // on core 0 and core 2. + * uxCoreAffinityMask = ( ( 1 << 0 ) | ( 1 << 2 ) ); + * + * //Set the core affinity mask for the task. + * vTaskCoreAffinitySet( xHandle, uxCoreAffinityMask ); + * } + */ + void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ); +#endif + +#if ( configUSE_CORE_AFFINITY == 1 ) + /** + * @brief Gets the core affinity mask for a task. + * + * configUSE_CORE_AFFINITY must be defined as 1 for this function to be + * available. + * + * @param xTask The handle of the task to get the core affinity mask for. + * Passing NULL will get the core affinity mask for the calling task. + * + * @return The core affinity mask which is a bitwise value that indicates + * the cores on which a task can run. Cores are numbered from 0 to + * configNUM_CORES - 1. For example, if a task can run on core 0 and core 1, + * the core affinity mask is 0x03. + * + * Example usage: + * + * // Task handle of the networking task - it is populated elsewhere. + * TaskHandle_t xNetworkingTaskHandle; + * + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxNetworkingCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * //Get the core affinity mask for the networking task. + * uxNetworkingCoreAffinityMask = vTaskCoreAffinityGet( xNetworkingTaskHandle ); + * + * // Here is a hypothetical scenario, just for the example. Assume that we + * // have 2 cores - Core 0 and core 1. We want to pin the application task to + * // the core different than the networking task to ensure that the + * // application task does not interfere with networking. + * if( ( uxNetworkingCoreAffinityMask & ( 1 << 0 ) ) != 0 ) + * { + * // The networking task can run on core 0, pin our task to core 1. + * vTaskCoreAffinitySet( xHandle, ( 1 << 1 ) ); + * } + * else + * { + * // Otherwise, pin our task to core 0. + * vTaskCoreAffinitySet( xHandle, ( 1 << 0 ) ); + * } + * } + */ + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ); +#endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) /** * @brief Disables preemption for a task. diff --git a/tasks.c b/tasks.c index 5f742aadc8d..9267ea4b641 100644 --- a/tasks.c +++ b/tasks.c @@ -290,6 +290,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ + #endif + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ @@ -832,12 +836,17 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, { if( xTaskPriority <= xLowestPriority ) { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = xCoreID; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = xCoreID; + } } } else @@ -899,6 +908,9 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxCurrentPriority = uxTopReadyPriority; BaseType_t xTaskScheduled = pdFALSE; BaseType_t xDecrementTopPriority = pdTRUE; + #if ( configUSE_CORE_AFFINITY == 1 ) + TCB_t * pxPreviousTCB = NULL; + #endif #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) BaseType_t xPriorityDropped = pdFALSE; #endif @@ -962,19 +974,32 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { - /* If the task is not being executed by any core swap it in. */ - pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; - pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; - pxCurrentTCBs[ xCoreID ] = pxTCB; - xTaskScheduled = pdTRUE; + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + { + /* If the task is not being executed by any core swap it in. */ + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; + #if ( configUSE_CORE_AFFINITY == 1 ) + pxPreviousTCB = pxCurrentTCBs[ xCoreID ]; + #endif + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + pxCurrentTCBs[ xCoreID ] = pxTCB; + xTaskScheduled = pdTRUE; + } } else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) { configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); - /* The task is already running on this core, mark it as scheduled. */ - pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; - xTaskScheduled = pdTRUE; + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + { + /* The task is already running on this core, mark it as scheduled. */ + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + xTaskScheduled = pdTRUE; + } } else { @@ -1040,6 +1065,71 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ } + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + { + /* A ready task was just bumped off this core. Look at the cores it can run from + * to see if it is able to run on any of them. */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; + BaseType_t xLowestPriorityCore = -1; + + if( pxPreviousTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + xLowestPriority = xLowestPriority - 1; + } + + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } + + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + + while( uxCoreMap != 0 ) + { + uint32_t uxCore; + BaseType_t xTaskPriority; + + uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + configASSERT( taskVALID_CORE_ID( uxCore ) ); + + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + if( pxCurrentTCBs[ uxCore ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + { + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } + + uxCoreMap &= ~( 1 << uxCore ); + + if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } + } + } + + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } + } + #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ + return xTaskScheduled; } @@ -1056,6 +1146,20 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateStaticAffinitySet(pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY); + } + + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; TaskHandle_t xReturn; @@ -1091,6 +1195,14 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + prvAddNewTaskToReadyList( pxNewTCB ); } else @@ -1108,6 +1220,15 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1142,6 +1263,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxCreatedTask, pxNewTCB, pxTaskDefinition->xRegions ); + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + prvAddNewTaskToReadyList( pxNewTCB ); xReturn = pdPASS; } @@ -1156,6 +1284,15 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1193,6 +1330,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxCreatedTask, pxNewTCB, pxTaskDefinition->xRegions ); + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + prvAddNewTaskToReadyList( pxNewTCB ); xReturn = pdPASS; } @@ -1212,6 +1356,19 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateAffinitySet(pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask); + } + + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; BaseType_t xReturn; @@ -1287,6 +1444,14 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + prvAddNewTaskToReadyList( pxNewTCB ); xReturn = pdPASS; } @@ -1448,6 +1613,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; + } + #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) { pxNewTCB->xPreemptionDisable = 0; @@ -2254,6 +2425,56 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + + if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) == 0 ) + { + prvYieldCore( xCoreID ); + } + } + } + } + taskEXIT_CRITICAL(); + } +#endif /* if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + UBaseType_t uxCoreAffinityMask; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + taskEXIT_CRITICAL(); + + return uxCoreAffinityMask; + } +#endif /* if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + +/*-----------------------------------------------------------*/ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) void vTaskPreemptionDisable( const TaskHandle_t xTask ) From 182a9f8a961ec24ab8f8629114ed4a116e9ea380 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 4 Aug 2022 17:32:00 +0800 Subject: [PATCH 050/164] Update pxYieldSpinLocks to per-cpu variable in SMP --- portable/ThirdParty/GCC/RP2040/port.c | 47 ++++++++++++++++++--------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index d365441aa9c..e03f3dc30da 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -133,7 +133,7 @@ static UBaseType_t uxCriticalNesting; static spin_lock_t * pxCrossCoreSpinLock; #endif /* LIB_PICO_MULTICORE */ - static spin_lock_t * pxYieldSpinLock; + static spin_lock_t * pxYieldSpinLocks[ configNUM_CORES ]; static uint32_t ulYieldSpinLockSaveValue; #endif /* configSUPPORT_PICO_SYNC_INTEROP */ @@ -444,10 +444,13 @@ void vPortEndScheduler( void ) void vPortYield( void ) { - #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) + #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) && ( confgNUM_CORES == 1 ) /* We are not in an ISR, and pxYieldSpinLock is always dealt with and - * cleared interrupts are re-enabled, so should be NULL */ - configASSERT( pxYieldSpinLock == NULL ); + * cleared interrupts are re-enabled, so should be NULL. + * + * This should only be checked with interrupt disabled in SMP. + */ + configASSERT( pxYieldSpinLocks[ get_core_num() ] == NULL ); #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* Set a PendSV to request a context switch. */ @@ -482,10 +485,14 @@ void vPortExitCritical( void ) void vPortEnableInterrupts() { #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) - if( pxYieldSpinLock ) { - spin_unlock(pxYieldSpinLock, ulYieldSpinLockSaveValue); - pxYieldSpinLock = NULL; + BaseType_t xCoreID = get_core_num(); + spin_lock_t * pxYieldSpinLock = pxYieldSpinLocks[ xCoreID ]; + if( pxYieldSpinLocks[ xCoreID ] ) + { + pxYieldSpinLocks[ xCoreID ] = NULL; + spin_unlock( pxYieldSpinLock, ulYieldSpinLockSaveValue ); + } } #endif __asm volatile ( " cpsie i " ::: "memory" ); @@ -982,19 +989,23 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - configASSERT( pxYieldSpinLock == NULL ); + BaseType_t xCoreID = get_core_num(); + configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); // we want to hold the lock until the event bits have been set; since interrupts are currently disabled // by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when // the scheduler is unlocked during this call configASSERT(pxLock->spin_lock); - pxYieldSpinLock = pxLock->spin_lock; + pxYieldSpinLocks[ xCoreID ] = pxLock->spin_lock; ulYieldSpinLockSaveValue = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, portMAX_DELAY); - /* sanity check that interrupts were disabled, then re-enabled during the call, which will have - * taken care of the yield */ - configASSERT( pxYieldSpinLock == NULL); + + #if ( configNUM_CORES == 1 ) + /* sanity check that interrupts were disabled, then re-enabled during the call, which will have + * taken care of the yield. This should be checked with interrupt were disabled in SMP. */ + configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); + #endif } } @@ -1052,7 +1063,8 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - configASSERT( pxYieldSpinLock == NULL ); + BaseType_t xCoreID = get_core_num(); + configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil ); if( uxTicksToWait ) @@ -1061,14 +1073,17 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) * by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when * the scheduler is unlocked during this call */ configASSERT(pxLock->spin_lock); - pxYieldSpinLock = pxLock->spin_lock; + pxYieldSpinLocks[ xCoreID ] = pxLock->spin_lock; ulYieldSpinLockSaveValue = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, uxTicksToWait ); + + #if ( configNUM_CORES == 1 ) /* sanity check that interrupts were disabled, then re-enabled during the call, which will have - * taken care of the yield */ - configASSERT( pxYieldSpinLock == NULL ); + * taken care of the yield. This should be checked with interrupt were disabled in SMP. */ + configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); + #endif } else { From 4df96ed17e4e1ab25998272163b054f73a9843d9 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Wed, 31 Aug 2022 10:36:32 +0530 Subject: [PATCH 051/164] Remove TODO log --- tasks.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tasks.c b/tasks.c index 9267ea4b641..98d4cc66145 100644 --- a/tasks.c +++ b/tasks.c @@ -4885,7 +4885,6 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) /* A task was made ready while the scheduler was suspended. */ eReturn = eAbortSleep; } - /* SMP_TODO : fix this with other commit. */ else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { /* A yield was pended while the scheduler was suspended. */ From 91de74c72ee4c61471df1287789589cfa5034821 Mon Sep 17 00:00:00 2001 From: alfred gedeon <28123637+alfred2g@users.noreply.github.com> Date: Thu, 2 Jun 2022 00:00:10 +0200 Subject: [PATCH 052/164] Add suppport for ARM CM55 (#494) * Add supposrt for ARM CM55 * Fix file header * Remove duplicate code * Refactor portmacro.h 1. portmacro.h is re-factored into 2 parts - portmacrocommon.h which is common to all ARMv8-M ports and portmacro.h which is different for different compiler and architecture. This enables us to provide Cortex-M55 ports without code duplication. 2. Update copy_files.py so that it copies Cortex-M55 ports correctly - all files except portmacro.h are used from Cortex-M33 ports. Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal --- .github/lexicon.txt | 1 + include/FreeRTOS.h | 6 + portable/ARMv8M/ReadMe.txt | 13 +- portable/ARMv8M/copy_files.py | 106 +- portable/ARMv8M/non_secure/ReadMe.txt | 16 +- .../portable/GCC/ARM_CM23/portmacro.h | 274 +--- .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 274 +--- .../portable/GCC/ARM_CM33/portasm.c | 16 +- .../portable/GCC/ARM_CM33/portmacro.h | 275 +--- .../portable/GCC/ARM_CM33_NTZ/portasm.c | 16 +- .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 275 +--- .../portable/GCC/ARM_CM55/portmacro.h | 71 + .../portable/IAR/ARM_CM23/portmacro.h | 278 +--- .../portable/IAR/ARM_CM23_NTZ/portmacro.h | 278 +--- .../portable/IAR/ARM_CM33/portasm.s | 16 +- .../portable/IAR/ARM_CM33/portmacro.h | 278 +--- .../portable/IAR/ARM_CM33_NTZ/portasm.s | 16 +- .../portable/IAR/ARM_CM33_NTZ/portmacro.h | 278 +--- .../portable/IAR/ARM_CM55/portmacro.h | 83 ++ portable/ARMv8M/non_secure/portmacrocommon.h | 311 +++++ portable/ARMv8M/secure/ReadMe.txt | 15 +- .../GCC/ARM_CM33/secure_context_port.c | 8 +- .../IAR/ARM_CM33/secure_context_port_asm.s | 8 +- portable/GCC/ARM_CM23/non_secure/portmacro.h | 274 +--- .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 311 +++++ .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 274 +--- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/GCC/ARM_CM33/non_secure/portasm.c | 16 +- portable/GCC/ARM_CM33/non_secure/portmacro.h | 275 +--- .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 311 +++++ .../GCC/ARM_CM33/secure/secure_context_port.c | 8 +- .../GCC/ARM_CM33_NTZ/non_secure/portasm.c | 16 +- .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 275 +--- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/GCC/ARM_CM55/non_secure/port.c | 1203 +++++++++++++++++ portable/GCC/ARM_CM55/non_secure/portasm.c | 452 +++++++ portable/GCC/ARM_CM55/non_secure/portasm.h | 114 ++ portable/GCC/ARM_CM55/non_secure/portmacro.h | 71 + .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 311 +++++ portable/GCC/ARM_CM55/secure/secure_context.c | 351 +++++ portable/GCC/ARM_CM55/secure/secure_context.h | 135 ++ .../GCC/ARM_CM55/secure/secure_context_port.c | 97 ++ portable/GCC/ARM_CM55/secure/secure_heap.c | 451 ++++++ portable/GCC/ARM_CM55/secure/secure_heap.h | 66 + portable/GCC/ARM_CM55/secure/secure_init.c | 106 ++ portable/GCC/ARM_CM55/secure/secure_init.h | 54 + .../GCC/ARM_CM55/secure/secure_port_macros.h | 140 ++ portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 1203 +++++++++++++++++ .../GCC/ARM_CM55_NTZ/non_secure/portasm.c | 351 +++++ .../GCC/ARM_CM55_NTZ/non_secure/portasm.h | 114 ++ .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 71 + .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM23/non_secure/portmacro.h | 278 +--- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 311 +++++ .../IAR/ARM_CM23_NTZ/non_secure/portmacro.h | 278 +--- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM33/non_secure/portasm.s | 16 +- portable/IAR/ARM_CM33/non_secure/portmacro.h | 278 +--- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 311 +++++ .../ARM_CM33/secure/secure_context_port_asm.s | 8 +- .../IAR/ARM_CM33_NTZ/non_secure/portasm.s | 16 +- .../IAR/ARM_CM33_NTZ/non_secure/portmacro.h | 278 +--- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM55/non_secure/port.c | 1203 +++++++++++++++++ portable/IAR/ARM_CM55/non_secure/portasm.h | 114 ++ portable/IAR/ARM_CM55/non_secure/portasm.s | 353 +++++ portable/IAR/ARM_CM55/non_secure/portmacro.h | 83 ++ .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM55/secure/secure_context.c | 351 +++++ portable/IAR/ARM_CM55/secure/secure_context.h | 135 ++ .../ARM_CM55/secure/secure_context_port_asm.s | 86 ++ portable/IAR/ARM_CM55/secure/secure_heap.c | 451 ++++++ portable/IAR/ARM_CM55/secure/secure_heap.h | 66 + portable/IAR/ARM_CM55/secure/secure_init.c | 106 ++ portable/IAR/ARM_CM55/secure/secure_init.h | 54 + .../IAR/ARM_CM55/secure/secure_port_macros.h | 140 ++ portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 1203 +++++++++++++++++ .../IAR/ARM_CM55_NTZ/non_secure/portasm.h | 114 ++ .../IAR/ARM_CM55_NTZ/non_secure/portasm.s | 262 ++++ .../IAR/ARM_CM55_NTZ/non_secure/portmacro.h | 83 ++ .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 311 +++++ 81 files changed, 14386 insertions(+), 4331 deletions(-) create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h create mode 100644 portable/ARMv8M/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM23/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM33/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM55/non_secure/port.c create mode 100644 portable/GCC/ARM_CM55/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM55/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM55/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM55/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM55/secure/secure_context.c create mode 100644 portable/GCC/ARM_CM55/secure/secure_context.h create mode 100644 portable/GCC/ARM_CM55/secure/secure_context_port.c create mode 100644 portable/GCC/ARM_CM55/secure/secure_heap.c create mode 100644 portable/GCC/ARM_CM55/secure/secure_heap.h create mode 100644 portable/GCC/ARM_CM55/secure/secure_init.c create mode 100644 portable/GCC/ARM_CM55/secure/secure_init.h create mode 100644 portable/GCC/ARM_CM55/secure/secure_port_macros.h create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/port.c create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM23/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM33/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM55/non_secure/port.c create mode 100644 portable/IAR/ARM_CM55/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM55/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM55/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM55/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM55/secure/secure_context.c create mode 100644 portable/IAR/ARM_CM55/secure/secure_context.h create mode 100644 portable/IAR/ARM_CM55/secure/secure_context_port_asm.s create mode 100644 portable/IAR/ARM_CM55/secure/secure_heap.c create mode 100644 portable/IAR/ARM_CM55/secure/secure_heap.h create mode 100644 portable/IAR/ARM_CM55/secure/secure_init.c create mode 100644 portable/IAR/ARM_CM55/secure/secure_init.h create mode 100644 portable/IAR/ARM_CM55/secure/secure_port_macros.h create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/port.c create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 718b0b37ec4..85aa17a7fb9 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1168,6 +1168,7 @@ mutexes mux muxes mv +mve mvfaclo mvtacgu mvtachi diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 46325477bc8..82fc189b986 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1168,6 +1168,12 @@ #define configENABLE_FPU 1 #endif +/* Set configENABLE_MVE to 1 to enable MVE support and 0 to disable it. This is + * currently used in ARMv8M ports. */ +#ifndef configENABLE_MVE + #define configENABLE_MVE 0 +#endif + /* Set configENABLE_TRUSTZONE to 1 enable TrustZone support and 0 to disable it. * This is currently used in ARMv8M ports. */ #ifndef configENABLE_TRUSTZONE diff --git a/portable/ARMv8M/ReadMe.txt b/portable/ARMv8M/ReadMe.txt index 0bb046a8d53..47194bfd6c6 100644 --- a/portable/ARMv8M/ReadMe.txt +++ b/portable/ARMv8M/ReadMe.txt @@ -1,10 +1,11 @@ -This directory tree contains the master copy of the FreeeRTOS Cortex-M33 port. +This directory tree contains the master copy of the FreeeRTOS Armv8-M and +Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM33_NNN directories prior to each +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to each FreeRTOS release. -If your Cortex-M33 application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM33 directories. +If your Armv8-M and Armv8.1-M application uses TrustZone then use the files from the +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. -If your Cortex-M33 application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM33_NTZ directories. +If your Armv8-M and Armv8.1-M application does not use TrustZone then use the files from +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index ac5aa45b864..5c2cca7fa1b 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -33,51 +33,89 @@ _FREERTOS_PORTABLE_DIRECTORY_ = os.path.dirname(_THIS_FILE_DIRECTORY_) _COMPILERS_ = ['GCC', 'IAR'] -_ARCH_NS_ = ['ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] -_ARCH_S_ = ['ARM_CM33', 'ARM_CM23'] +_ARCH_NS_ = ['ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] +_ARCH_S_ = ['ARM_CM55', 'ARM_CM33', 'ARM_CM23'] -_SUPPORTED_CONFIGS_ = { - 'GCC' : ['ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'], - 'IAR' : ['ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] - } - -# Files to be complied in the Secure Project -_SECURE_FILE_PATHS_ = [ +# Files to be compiled in the Secure Project +_SECURE_COMMON_FILE_PATHS_ = [ os.path.join('secure', 'context'), - os.path.join('secure', 'context', 'portable', '_COMPILER_ARCH_'), os.path.join('secure', 'heap'), os.path.join('secure', 'init'), os.path.join('secure', 'macros') ] -# Files to be complied in the Non-Secure Project -_NONSECURE_FILE_PATHS_ = [ - 'non_secure', - os.path.join('non_secure', 'portable', '_COMPILER_ARCH_') +_SECURE_PORTABLE_FILE_PATHS_ = { + 'GCC':{ + 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], + 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] + }, + 'IAR':{ + 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], + 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] + } +} + +# Files to be compiled in the Non-Secure Project +_NONSECURE_COMMON_FILE_PATHS_ = [ + 'non_secure' ] - -def is_supported_config(compiler, arch): - return arch in _SUPPORTED_CONFIGS_[compiler] +_NONSECURE_PORTABLE_FILE_PATHS_ = { + 'GCC':{ + 'ARM_CM23' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM23')], + 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM23_NTZ')], + 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')], + 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], + 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')] + }, + 'IAR':{ + 'ARM_CM23' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM23')], + 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM23_NTZ')], + 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')], + 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], + 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')] + }, +} def copy_files_in_dir(src_abs_path, dst_abs_path): - for src_file in os.listdir(src_abs_path): - src_file_abs_path = os.path.join(src_abs_path, src_file) - if os.path.isfile(src_file_abs_path) and src_file != 'ReadMe.txt': - if not os.path.exists(dst_abs_path): - os.makedirs(dst_abs_path) - print('Copying {}...'.format(os.path.basename(src_file_abs_path))) - shutil.copy2(src_file_abs_path, dst_abs_path) + if os.path.isfile(src_abs_path): + print('Src: {}'.format(src_abs_path)) + print('Dst: {}\n'.format(dst_abs_path)) + shutil.copy2(src_abs_path, dst_abs_path) + else: + for src_file in os.listdir(src_abs_path): + src_file_abs_path = os.path.join(src_abs_path, src_file) + if os.path.isfile(src_file_abs_path) and src_file != 'ReadMe.txt': + if not os.path.exists(dst_abs_path): + os.makedirs(dst_abs_path) + print('Src: {}'.format(src_file_abs_path)) + print('Dst: {}\n'.format(dst_abs_path)) + shutil.copy2(src_file_abs_path, dst_abs_path) + + +def copy_common_files_for_compiler_and_arch(compiler, arch, src_paths, dst_path): + for src_path in src_paths: + src_abs_path = os.path.join(_THIS_FILE_DIRECTORY_, src_path) + dst_abs_path = os.path.join(_FREERTOS_PORTABLE_DIRECTORY_, compiler, arch, dst_path) -def copy_files_for_compiler_and_arch(compiler, arch, src_paths, dst_path): - _COMPILER_ARCH_ = os.path.join(compiler, arch) - for src_path in src_paths: - src_path_sanitized = src_path.replace('_COMPILER_ARCH_', _COMPILER_ARCH_ ) + copy_files_in_dir(src_abs_path, dst_abs_path) + + +def copy_portable_files_for_compiler_and_arch(compiler, arch, src_paths, dst_path): + for src_path in src_paths[compiler][arch]: - src_abs_path = os.path.join(_THIS_FILE_DIRECTORY_, src_path_sanitized) - dst_abs_path = os.path.join(_FREERTOS_PORTABLE_DIRECTORY_, _COMPILER_ARCH_, dst_path) + src_abs_path = os.path.join(_THIS_FILE_DIRECTORY_, src_path) + dst_abs_path = os.path.join(_FREERTOS_PORTABLE_DIRECTORY_, compiler, arch, dst_path) copy_files_in_dir(src_abs_path, dst_abs_path) @@ -86,14 +124,14 @@ def copy_files(): # Copy Secure Files for compiler in _COMPILERS_: for arch in _ARCH_S_: - if is_supported_config(compiler, arch): - copy_files_for_compiler_and_arch(compiler, arch, _SECURE_FILE_PATHS_, 'secure') + copy_common_files_for_compiler_and_arch(compiler, arch, _SECURE_COMMON_FILE_PATHS_, 'secure') + copy_portable_files_for_compiler_and_arch(compiler, arch, _SECURE_PORTABLE_FILE_PATHS_, 'secure') # Copy Non-Secure Files for compiler in _COMPILERS_: for arch in _ARCH_NS_: - if is_supported_config(compiler, arch): - copy_files_for_compiler_and_arch(compiler, arch, _NONSECURE_FILE_PATHS_, 'non_secure') + copy_common_files_for_compiler_and_arch(compiler, arch, _NONSECURE_COMMON_FILE_PATHS_, 'non_secure') + copy_portable_files_for_compiler_and_arch(compiler, arch, _NONSECURE_PORTABLE_FILE_PATHS_, 'non_secure') def main(): diff --git a/portable/ARMv8M/non_secure/ReadMe.txt b/portable/ARMv8M/non_secure/ReadMe.txt index 994d93754ab..777e8921828 100644 --- a/portable/ARMv8M/non_secure/ReadMe.txt +++ b/portable/ARMv8M/non_secure/ReadMe.txt @@ -1,11 +1,11 @@ -This directory tree contains the master copy of the FreeeRTOS Cortex-M33 port. +This directory tree contains the master copy of the FreeRTOS Armv8-M and +Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM33_NNN directories prior to each -FreeRTOS release. +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to +each FreeRTOS release. -If your Cortex-M33 application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM33 directories. - -If your Cortex-M33 application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM33_NTZ directories. +If your Armv8-M/Armv8.1-M application uses TrustZone then use the files from the +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. +If your Armv8-M/Armv8.1-M application does not use TrustZone then use the files from +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index eacf22fd6f5..3486481afe9 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,277 +45,27 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index eacf22fd6f5..3486481afe9 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,277 +45,27 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c index e539a5e1efd..16f7e2f2433 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c @@ -267,11 +267,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vstmdbeq r2!, {s16-s31} \n"/* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ " str r2, [r1] \n"/* Save the new top of stack in TCB. */ @@ -386,11 +386,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vldmiaeq r2!, {s16-s31} \n"/* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " msr psp, r2 \n"/* Remember the new top of stack for the task. */ " bx lr \n" " \n" diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index c560343b34e..766dfb0e754 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,273 +45,22 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c index f0a9f3e4a01..f7e4aed53ba 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c @@ -226,11 +226,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vstmdbeq r0!, {s16-s31} \n"/* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) " mrs r1, psplim \n"/* r1 = PSPLIM. */ " mrs r2, control \n"/* r2 = CONTROL. */ @@ -305,11 +305,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ #endif /* configENABLE_MPU */ " \n" - #if ( configENABLE_FPU == 1 ) - " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vldmiaeq r0!, {s16-s31} \n"/* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" #if ( configENABLE_MPU == 1 ) " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index c560343b34e..766dfb0e754 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,273 +45,22 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h new file mode 100644 index 00000000000..b654748e138 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h index fc70f3d47d4..0f7326c850b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h index 22e152e6de6..0f7326c850b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s index f8fd04f9ff9..44cd8d0e19d 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s @@ -219,11 +219,11 @@ PendSV_Handler: save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vstmdbeq r2!, {s16-s31} /* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ @@ -323,11 +323,11 @@ PendSV_Handler: restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ - #if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vldmiaeq r2!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr /*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h index 17aa4ed9fb6..5e5b20c5d21 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s index 64c0295a82a..9e9970cd40f 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s @@ -171,11 +171,11 @@ vClearInterruptMask: PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ -#if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */ -#endif /* configENABLE_FPU */ + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) mrs r1, psplim /* r1 = PSPLIM. */ mrs r2, control /* r2 = CONTROL. */ @@ -235,11 +235,11 @@ PendSV_Handler: ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ #endif /* configENABLE_MPU */ -#if ( configENABLE_FPU == 1 ) - tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */ -#endif /* configENABLE_FPU */ + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) msr psplim, r1 /* Restore the PSPLIM register value for the task. */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h index 17aa4ed9fb6..5e5b20c5d21 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h new file mode 100644 index 00000000000..a3b510e282c --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/ARMv8M/secure/ReadMe.txt b/portable/ARMv8M/secure/ReadMe.txt index 0bb046a8d53..777e8921828 100644 --- a/portable/ARMv8M/secure/ReadMe.txt +++ b/portable/ARMv8M/secure/ReadMe.txt @@ -1,10 +1,11 @@ -This directory tree contains the master copy of the FreeeRTOS Cortex-M33 port. +This directory tree contains the master copy of the FreeRTOS Armv8-M and +Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM33_NNN directories prior to each -FreeRTOS release. +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to +each FreeRTOS release. -If your Cortex-M33 application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM33 directories. +If your Armv8-M/Armv8.1-M application uses TrustZone then use the files from the +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. -If your Cortex-M33 application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM33_NTZ directories. +If your Armv8-M/Armv8.1-M application does not use TrustZone then use the files from +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. diff --git a/portable/ARMv8M/secure/context/portable/GCC/ARM_CM33/secure_context_port.c b/portable/ARMv8M/secure/context/portable/GCC/ARM_CM33/secure_context_port.c index a6bf54cae14..ebf02077c7a 100644 --- a/portable/ARMv8M/secure/context/portable/GCC/ARM_CM33/secure_context_port.c +++ b/portable/ARMv8M/secure/context/portable/GCC/ARM_CM33/secure_context_port.c @@ -73,10 +73,10 @@ void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ " mrs r1, psp \n" /* r1 = PSP. */ " \n" - #if ( configENABLE_FPU == 1 ) - " vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */ - " vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */ - #endif /* configENABLE_FPU */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" #if ( configENABLE_MPU == 1 ) " mrs r2, control \n" /* r2 = CONTROL. */ diff --git a/portable/ARMv8M/secure/context/portable/IAR/ARM_CM33/secure_context_port_asm.s b/portable/ARMv8M/secure/context/portable/IAR/ARM_CM33/secure_context_port_asm.s index 52dbe456342..99240ca08c4 100644 --- a/portable/ARMv8M/secure/context/portable/IAR/ARM_CM33/secure_context_port_asm.s +++ b/portable/ARMv8M/secure/context/portable/IAR/ARM_CM33/secure_context_port_asm.s @@ -64,10 +64,10 @@ SecureContext_SaveContextAsm: cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ mrs r1, psp /* r1 = PSP. */ -#if ( configENABLE_FPU == 1 ) - vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */ - vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */ -#endif /* configENABLE_FPU */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) mrs r2, control /* r2 = CONTROL. */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index eacf22fd6f5..3486481afe9 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,277 +45,27 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index eacf22fd6f5..3486481afe9 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,277 +45,27 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/portasm.c b/portable/GCC/ARM_CM33/non_secure/portasm.c index e539a5e1efd..16f7e2f2433 100644 --- a/portable/GCC/ARM_CM33/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33/non_secure/portasm.c @@ -267,11 +267,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vstmdbeq r2!, {s16-s31} \n"/* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ " str r2, [r1] \n"/* Save the new top of stack in TCB. */ @@ -386,11 +386,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vldmiaeq r2!, {s16-s31} \n"/* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " msr psp, r2 \n"/* Remember the new top of stack for the task. */ " bx lr \n" " \n" diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index c560343b34e..766dfb0e754 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,273 +45,22 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM33/secure/secure_context_port.c b/portable/GCC/ARM_CM33/secure/secure_context_port.c index a6bf54cae14..ebf02077c7a 100644 --- a/portable/GCC/ARM_CM33/secure/secure_context_port.c +++ b/portable/GCC/ARM_CM33/secure/secure_context_port.c @@ -73,10 +73,10 @@ void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ " mrs r1, psp \n" /* r1 = PSP. */ " \n" - #if ( configENABLE_FPU == 1 ) - " vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */ - " vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */ - #endif /* configENABLE_FPU */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" #if ( configENABLE_MPU == 1 ) " mrs r2, control \n" /* r2 = CONTROL. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c index f0a9f3e4a01..f7e4aed53ba 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c @@ -226,11 +226,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ - #if ( configENABLE_FPU == 1 ) - " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vstmdbeq r0!, {s16-s31} \n"/* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) " mrs r1, psplim \n"/* r1 = PSPLIM. */ " mrs r2, control \n"/* r2 = CONTROL. */ @@ -305,11 +305,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ #endif /* configENABLE_MPU */ " \n" - #if ( configENABLE_FPU == 1 ) - " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" - " vldmiaeq r0!, {s16-s31} \n"/* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" #if ( configENABLE_MPU == 1 ) " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index c560343b34e..766dfb0e754 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,273 +45,22 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __attribute__( ( used ) ) -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __attribute__( ( used ) ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -/*-----------------------------------------------------------*/ - - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.c b/portable/GCC/ARM_CM55/non_secure/portasm.c new file mode 100644 index 00000000000..3f810056b77 --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/portasm.c @@ -0,0 +1,452 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r2] \n"/* Program RNR = 4. */ + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + " ldr r5, xSecureContextConst2 \n" + " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " msr control, r3 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r4 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + "xSecureContextConst2: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " mrs r2, psp \n"/* Read PSP in r2. */ + " \n" + " cbz r0, save_ns_context \n"/* No secure context to save. */ + " push {r0-r2, r14} \n" + " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n"/* LR is now in r3. */ + " mov lr, r3 \n"/* LR = r3. */ + " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " b select_next_task \n" + " \n" + " save_ns_context: \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r3] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ + " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r3] \n"/* Program MAIR0. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r3] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r3] \n"/* Program RNR = 8. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r3] \n"/* Program RNR = 12. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r3] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #else /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #endif /* configENABLE_MPU */ + " \n" + " restore_ns_context: \n" + " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + "xSecureContextConst: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ + +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " svc %0 \n"/* Secure context is allocated in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ + " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ + " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ + " it ne \n" + " svcne %0 \n"/* Secure context is freed in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.h b/portable/GCC/ARM_CM55/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h new file mode 100644 index 00000000000..b654748e138 --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM55/secure/secure_context.c b/portable/GCC/ARM_CM55/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/secure/secure_context.h b/portable/GCC/ARM_CM55/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/GCC/ARM_CM55/secure/secure_context_port.c b/portable/GCC/ARM_CM55/secure/secure_context_port.c new file mode 100644 index 00000000000..13520870bca --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_context_port.c @@ -0,0 +1,97 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + " msr control, r3 \n" /* CONTROL = r3. */ + #endif /* configENABLE_MPU */ + " \n" + " msr psplim, r2 \n" /* PSPLIM = r2. */ + " msr psp, r1 \n" /* PSP = r1. */ + " \n" + " load_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::: "r0", "r1", "r2" + ); +} +/*-----------------------------------------------------------*/ + +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " mrs r1, psp \n" /* r1 = PSP. */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " mrs r2, control \n" /* r2 = CONTROL. */ + " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */ + " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */ + " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + " \n" + " save_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::"i" ( securecontextNO_STACK ) : "r1", "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/secure/secure_heap.c b/portable/GCC/ARM_CM55/secure/secure_heap.c new file mode 100644 index 00000000000..c633e2d0596 --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_heap.c @@ -0,0 +1,451 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart, * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/secure/secure_heap.h b/portable/GCC/ARM_CM55/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/GCC/ARM_CM55/secure/secure_init.c b/portable/GCC/ARM_CM55/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/secure/secure_init.h b/portable/GCC/ARM_CM55/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/GCC/ARM_CM55/secure/secure_port_macros.h b/portable/GCC/ARM_CM55/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/GCC/ARM_CM55/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c new file mode 100644 index 00000000000..0c2fac21992 --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " msr control, r2 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, psp \n"/* Read PSP in r0. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r2, control \n"/* r2 = CONTROL. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ + #else /* configENABLE_MPU */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ + #endif /* configENABLE_MPU */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ + #else /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ + #else /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + #endif /* configENABLE_MPU */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " bx r3 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..b654748e138 --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacro.h b/portable/IAR/ARM_CM23/non_secure/portmacro.h index fc70f3d47d4..0f7326c850b 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h index 22e152e6de6..0f7326c850b 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M23" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M23" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/portasm.s b/portable/IAR/ARM_CM33/non_secure/portasm.s index f8fd04f9ff9..44cd8d0e19d 100644 --- a/portable/IAR/ARM_CM33/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33/non_secure/portasm.s @@ -219,11 +219,11 @@ PendSV_Handler: save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vstmdbeq r2!, {s16-s31} /* Store the FPU registers which are not saved automatically. */ - #endif /* configENABLE_FPU */ + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ @@ -323,11 +323,11 @@ PendSV_Handler: restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ - #if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vldmiaeq r2!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */ - #endif /* configENABLE_FPU */ + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacro.h b/portable/IAR/ARM_CM33/non_secure/portmacro.h index 17aa4ed9fb6..5e5b20c5d21 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s b/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s index 52dbe456342..99240ca08c4 100644 --- a/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s +++ b/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s @@ -64,10 +64,10 @@ SecureContext_SaveContextAsm: cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ mrs r1, psp /* r1 = PSP. */ -#if ( configENABLE_FPU == 1 ) - vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */ - vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */ -#endif /* configENABLE_FPU */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) mrs r2, control /* r2 = CONTROL. */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s index 64c0295a82a..9e9970cd40f 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s @@ -171,11 +171,11 @@ vClearInterruptMask: PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ -#if ( configENABLE_FPU == 1 ) - tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */ -#endif /* configENABLE_FPU */ + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) mrs r1, psplim /* r1 = PSPLIM. */ mrs r2, control /* r2 = CONTROL. */ @@ -235,11 +235,11 @@ PendSV_Handler: ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ #endif /* configENABLE_MPU */ -#if ( configENABLE_FPU == 1 ) - tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ it eq - vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */ -#endif /* configENABLE_FPU */ + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ #if ( configENABLE_MPU == 1 ) msr psplim, r1 /* Restore the PSPLIM register value for the task. */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h index 17aa4ed9fb6..5e5b20c5d21 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,284 +45,34 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ - - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ - - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ - -/*-----------------------------------------------------------*/ - -/** - * @brief Type definitions. - */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configUSE_16_BIT_TICKS == 1 ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL - -/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do - * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #endif -/*-----------------------------------------------------------*/ - /** * Architecture specifics. */ - #define portARCH_NAME "Cortex-M33" - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 - #define portDONT_DISCARD __root -/*-----------------------------------------------------------*/ - -/** - * @brief Extern declarations. - */ - extern BaseType_t xPortIsInsideInterrupt( void ); - - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ - - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#define portARCH_NAME "Cortex-M33" +#define portDONT_DISCARD __root /*-----------------------------------------------------------*/ -/** - * @brief MPU specific constants. - */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ - -/* MPU settings that can be overriden in FreeRTOSConfig.h. */ -#ifndef configTOTAL_MPU_REGIONS - /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) -#endif - #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif - -/* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ - -/* Device memory attributes used in MPU_MAIR registers. - * - * 8-bit values encoded as follows: - * Bit[7:4] - 0000 - Device Memory - * Bit[3:2] - 00 --> Device-nGnRnE - * 01 --> Device-nGnRE - * 10 --> Device-nGRE - * 11 --> Device-GRE - * Bit[1:0] - 00, Reserved. - */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ - -/* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ - -/* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) -/*-----------------------------------------------------------*/ - -/** - * @brief Settings to define an MPU region. - */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; - -/** - * @brief MPU settings as stored in the TCB. - */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; -/*-----------------------------------------------------------*/ - -/** - * @brief SVC numbers. - */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 -/*-----------------------------------------------------------*/ - -/** - * @brief Scheduler utilities. - */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portDISABLE_INTERRUPTS() ulSetInterruptMask() - #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() -/*-----------------------------------------------------------*/ - -/** - * @brief Tickless idle/low power functionality. - */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif -/*-----------------------------------------------------------*/ - -/** - * @brief Task function macros as described on the FreeRTOS.org WEB site. - */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) -/*-----------------------------------------------------------*/ - - #if ( configENABLE_TRUSTZONE == 1 ) - -/** - * @brief Allocate a secure context for the task. - * - * Tasks are not created with a secure context. Any task that is going to call - * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a - * secure context before it calls any secure function. - * - * @param[in] ulSecureStackSize The size of the secure stack to be allocated. - */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) - -/** - * @brief Called when a task is deleted to delete the task's secure context, - * if it has one. - * - * @param[in] pxTCB The TCB of the task being deleted. - */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ -/*-----------------------------------------------------------*/ - - #if ( configENABLE_MPU == 1 ) - -/** - * @brief Checks whether or not the processor is privileged. - * - * @return 1 if the processor is already privileged, 0 otherwise. - */ - #define portIS_PRIVILEGED() xIsPrivileged() - -/** - * @brief Raise an SVC request to raise privilege. - * - * The SVC handler checks that the SVC was raised from a system call and only - * then it raises the privilege. If this is called from any other place, - * the privilege is not raised. - */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); - -/** - * @brief Lowers the privilege level by setting the bit 0 of the CONTROL - * register. - */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ -/*-----------------------------------------------------------*/ - -/** - * @brief Barriers. - */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Be006 - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus + } +#endif #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/non_secure/portasm.h b/portable/IAR/ARM_CM55/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM55/non_secure/portasm.s b/portable/IAR/ARM_CM55/non_secure/portasm.s new file mode 100644 index 00000000000..33817d00253 --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/portasm.s @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN xSecureContext + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + EXTERN SecureContext_SaveContext + EXTERN SecureContext_LoadContext + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vPortAllocateSecureContext + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler + PUBLIC vPortFreeSecureContext +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vPortAllocateSecureContext: + svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r3, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r2] /* Program RNR = 4. */ + adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + ldr r5, =xSecureContext + str r1, [r5] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + msr control, r3 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r4 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + ldr r4, =xSecureContext + str r1, [r4] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + mrs r2, psp /* Read PSP in r2. */ + + cbz r0, save_ns_context /* No secure context to save. */ + push {r0-r2, r14} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* LR = r3. */ + lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ +#if ( configENABLE_MPU == 1 ) + subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ +#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ +#endif /* configENABLE_MPU */ + b select_next_task + + save_ns_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #16 /* r2 = r2 + 16. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + subs r2, r2, #16 /* r2 = r2 - 16. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #12 /* r2 = r2 + 12. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + subs r2, r2, #12 /* r2 = r2 - 12. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + + #if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r3] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ + ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r3] /* Program MAIR0. */ + ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r3] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r3] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + + #if ( configENABLE_MPU == 1 ) + ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r3 /* Restore the CONTROL register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #else /* configENABLE_MPU */ + ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #endif /* configENABLE_MPU */ + + restore_ns_context: + ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + +vPortFreeSecureContext: + /* r0 = uint32_t *pulTCB. */ + ldr r2, [r0] /* The first item in the TCB is the top of the stack. */ + ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */ + cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */ + it ne + svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM55/non_secure/portmacro.h b/portable/IAR/ARM_CM55/non_secure/portmacro.h new file mode 100644 index 00000000000..a3b510e282c --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM55/secure/secure_context.c b/portable/IAR/ARM_CM55/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/secure/secure_context.h b/portable/IAR/ARM_CM55/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s b/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s new file mode 100644 index 00000000000..400bd0107a3 --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s @@ -0,0 +1,86 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + SECTION .text:CODE:NOROOT(2) + THUMB + +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + PUBLIC SecureContext_LoadContextAsm + PUBLIC SecureContext_SaveContextAsm +/*-----------------------------------------------------------*/ + +SecureContext_LoadContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + +#if ( configENABLE_MPU == 1 ) + ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + msr control, r3 /* CONTROL = r3. */ +#endif /* configENABLE_MPU */ + + msr psplim, r2 /* PSPLIM = r2. */ + msr psp, r1 /* PSP = r1. */ + + load_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + +SecureContext_SaveContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + mrs r1, psp /* r1 = PSP. */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + +#if ( configENABLE_MPU == 1 ) + mrs r2, control /* r2 = CONTROL. */ + stmdb r1!, {r2} /* Store CONTROL value on the stack. */ +#endif /* configENABLE_MPU */ + + str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + movs r1, #0 /* r1 = securecontextNO_STACK. */ + msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */ + msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + + save_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM55/secure/secure_heap.c b/portable/IAR/ARM_CM55/secure/secure_heap.c new file mode 100644 index 00000000000..c633e2d0596 --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_heap.c @@ -0,0 +1,451 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart, * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/secure/secure_heap.h b/portable/IAR/ARM_CM55/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/IAR/ARM_CM55/secure/secure_init.c b/portable/IAR/ARM_CM55/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/secure/secure_init.h b/portable/IAR/ARM_CM55/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/IAR/ARM_CM55/secure/secure_port_macros.h b/portable/IAR/ARM_CM55/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/IAR/ARM_CM55/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s new file mode 100644 index 00000000000..93b58da3543 --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s @@ -0,0 +1,262 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + msr control, r2 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r2 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + mrs r0, psp /* Read PSP in r0. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ +#if ( configENABLE_MPU == 1 ) + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r2, control /* r2 = CONTROL. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ +#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ +#endif /* configENABLE_MPU */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + str r0, [r1] /* Save the new top of stack in TCB. */ + + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ +#else /* configENABLE_MPU */ + ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ +#endif /* configENABLE_MPU */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + + #if ( configENABLE_MPU == 1 ) + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r2 /* Restore the CONTROL register value for the task. */ +#else /* configENABLE_MPU */ + msr psplim, r2 /* Restore the PSPLIM register value for the task. */ +#endif /* configENABLE_MPU */ + msr psp, r0 /* Remember the new top of stack for the task. */ + bx r3 +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..a3b510e282c --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M55" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ From fd19dbabf481936f7e1d99c7ed28e6bc1422cb29 Mon Sep 17 00:00:00 2001 From: AndreiCherniaev Date: Thu, 2 Jun 2022 21:03:37 +0400 Subject: [PATCH 053/164] add extra check for compiler time (#499) minor change to add extra check for compiler time to prevent bad config Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/ThirdParty/GCC/ATmega/port.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/portable/ThirdParty/GCC/ATmega/port.c b/portable/ThirdParty/GCC/ATmega/port.c index 82779498571..f998cdb28b8 100644 --- a/portable/ThirdParty/GCC/ATmega/port.c +++ b/portable/ThirdParty/GCC/ATmega/port.c @@ -61,6 +61,8 @@ #define portTIMSK TIMSK0 #define portTIFR TIFR0 +#else + #error "No Timer defined for scheduler" #endif /*-----------------------------------------------------------*/ From a4b73d1f7e27c78cf943f564a83a395c13d40bf2 Mon Sep 17 00:00:00 2001 From: Tanmoy Sen <33438891+tanmoysen@users.noreply.github.com> Date: Wed, 8 Jun 2022 10:09:39 -0700 Subject: [PATCH 054/164] Update feature_request.md (#500) * Update feature_request.md * Remove trailing spaces Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal --- .github/ISSUE_TEMPLATE/feature_request.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 143cedd7e18..645f6848019 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -16,5 +16,15 @@ A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. +**How many devices will this feature impact?** +Expected volume for your product. + +**What are your project timelines?** +Timeline for milestones such as design completion, testing and validation, and production. + **Additional context** Add any other context or screenshots about the feature request here. + + +If you have the same (or similar) feature request, please upvote this issue with thumbs up 👍 +and use the comments section to provide answers to the questions above. From 76911db7689c7427430b41b29272585db7e137c1 Mon Sep 17 00:00:00 2001 From: Ravishankar Bhagavandas Date: Mon, 20 Jun 2022 17:48:34 -0700 Subject: [PATCH 055/164] Add callback overrides for stream buffer and message buffers (#437) * Let each stream/message can use its own sbSEND_COMPLETED In FreeRTOS.h, set the default value of configUSE_SB_COMPLETED_CALLBACK to zero, and add additional space for the function pointer when the buffer created statically. In stream_buffer.c, modify the macro of sbSEND_COMPLETED which let the stream buffer to use its own implementation, and then add an pointer to the stream buffer's structure, and modify the implementation of the buffer creating and initializing Co-authored-by: eddie9712 --- .github/lexicon.txt | 3 + History.txt | 11 +++ include/FreeRTOS.h | 9 ++ include/message_buffer.h | 36 ++++++- include/mpu_prototypes.h | 8 +- include/stream_buffer.h | 52 +++++++++- portable/Common/mpu_wrappers.c | 62 ++++++++++-- stream_buffer.c | 171 +++++++++++++++++++++++++++------ 8 files changed, 305 insertions(+), 47 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 85aa17a7fb9..751013504dc 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1693,10 +1693,12 @@ pxqueuesetcontainer pxramstack pxreadycoroutinelists pxreadytaskslists +pxreceivecompletedcallback pxregions pxresult pxrxedmessage pxsemaphorebuffer +pxsendcompletedcallback pxstack pxstackbase pxstackbuffer @@ -2796,6 +2798,7 @@ xinterruptcontroller xinterruptdescriptortable xisfeasable xisfeasible +xisinsideisr xismessagebuffer xisprivileged xitemvalue diff --git a/History.txt b/History.txt index ba456f73119..7f57de148fe 100644 --- a/History.txt +++ b/History.txt @@ -13,6 +13,17 @@ Documentation and download available at https://www.FreeRTOS.org/ are both typedefs of the same struct xLIST_ITEM. This addresses some issues observed when strict-aliasing and link time optimization are enabled. To maintain backwards compatibility, configUSE_MINI_LIST_ITEM defaults to 1. + + Add the ability to override send and receive completed callbacks for each + instance of a stream buffer or message buffer. The feature can be controlled + by setting the configuration option configUSE_SB_COMPLETED_CALLBACK in + FreeRTOSConfig.h. When the option is set to 1, APIs + xStreamBufferCreateWithCallback() or xStreamBufferCreateStaticWithCallback() + (and likewise APIs from message buffer) can be used to create a stream buffer + or message buffer instance with application provided callback overrides. When + the option is set to 0, then the default callbacks as defined by + sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain + backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The + functionaility is currently not supported for MPU enabled ports. Changes between FreeRTOS V10.4.5 and FreeRTOS V10.4.6 released November 12 2021 diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 82fc189b986..bed390534ab 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -994,6 +994,12 @@ #define configUSE_POSIX_ERRNO 0 #endif +#ifndef configUSE_SB_COMPLETED_CALLBACK + +/* By default per-instance callbacks are not enabled for stream buffer or message buffer. */ + #define configUSE_SB_COMPLETED_CALLBACK 0 +#endif + #ifndef portTICK_TYPE_IS_ATOMIC #define portTICK_TYPE_IS_ATOMIC 0 #endif @@ -1480,6 +1486,9 @@ typedef struct xSTATIC_STREAM_BUFFER #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxDummy4; #endif + #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + void * pvDummy5[ 2 ]; + #endif } StaticStreamBuffer_t; /* Message buffers are built on stream buffers. */ diff --git a/include/message_buffer.h b/include/message_buffer.h index b1962fac7a1..32284c4fb23 100644 --- a/include/message_buffer.h +++ b/include/message_buffer.h @@ -107,6 +107,18 @@ typedef void * MessageBufferHandle_t; * 32-bit architecture, so on most 32-bit architectures a 10 byte message will * take up 14 bytes of message buffer space. * + * @param pxSendCompletedCallback Callback invoked when a send operation to the + * message buffer is complete. If the parameter is NULL or xMessageBufferCreate() + * is called without the parameter, then it will use the default implementation + * provided by sbSEND_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * + * @param pxReceiveCompletedCallback Callback invoked when a receive operation from + * the message buffer is complete. If the parameter is NULL or xMessageBufferCreate() + * is called without the parameter, it will use the default implementation provided + * by sbRECEIVE_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * * @return If NULL is returned, then the message buffer cannot be created * because there is insufficient heap memory available for FreeRTOS to allocate * the message buffer data structures and storage area. A non-NULL value being @@ -143,7 +155,12 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreate( xBufferSizeBytes ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE ) + ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, NULL, NULL ) + +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define xMessageBufferCreateWithCallback( xBufferSizeBytes, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ + ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, pxSendCompletedCallback, pxReceiveCompletedCallback ) +#endif /** * message_buffer.h @@ -172,6 +189,16 @@ typedef void * MessageBufferHandle_t; * StaticMessageBuffer_t, which will be used to hold the message buffer's data * structure. * + * @param pxSendCompletedCallback Callback invoked when a new message is sent to the message buffer. + * If the parameter is NULL or xMessageBufferCreate() is called without the parameter, then it will use the default + * implementation provided by sbSEND_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * + * @param pxReceiveCompletedCallback Callback invoked when a message is read from a + * message buffer. If the parameter is NULL or xMessageBufferCreate() is called without the parameter, it will + * use the default implementation provided by sbRECEIVE_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * * @return If the message buffer is created successfully then a handle to the * created message buffer is returned. If either pucMessageBufferStorageArea or * pxStaticmessageBuffer are NULL then NULL is returned. @@ -210,7 +237,12 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer ) + ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, NULL, NULL ) + +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define xMessageBufferCreateStaticWithCallback( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ + ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) +#endif /** * message_buffer.h diff --git a/include/mpu_prototypes.h b/include/mpu_prototypes.h index 2c7b0e39ffb..1893db844c1 100644 --- a/include/mpu_prototypes.h +++ b/include/mpu_prototypes.h @@ -248,12 +248,16 @@ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - BaseType_t xIsMessageBuffer ) FREERTOS_SYSTEM_CALL; + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, - StaticStreamBuffer_t * const pxStaticStreamBuffer ) FREERTOS_SYSTEM_CALL; + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; diff --git a/include/stream_buffer.h b/include/stream_buffer.h index a00fd7cce7f..a3a263d95c3 100644 --- a/include/stream_buffer.h +++ b/include/stream_buffer.h @@ -71,6 +71,12 @@ struct StreamBufferDef_t; typedef struct StreamBufferDef_t * StreamBufferHandle_t; +/** + * Type used as a stream buffer's optional callback. + */ +typedef void (* StreamBufferCallbackFunction_t)( StreamBufferHandle_t xStreamBuffer, + BaseType_t xIsInsideISR, + BaseType_t * const pxHigherPriorityTaskWoken ); /** * stream_buffer.h @@ -103,6 +109,16 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t; * trigger level of 1 being used. It is not valid to specify a trigger level * that is greater than the buffer size. * + * @param pxSendCompletedCallback Callback invoked when number of bytes at least equal to + * trigger level is sent to the stream buffer. If the parameter is NULL, it will use the default + * implementation provided by sbSEND_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * + * @param pxReceiveCompletedCallback Callback invoked when more than zero bytes are read from a + * stream buffer. If the parameter is NULL, it will use the default + * implementation provided by sbRECEIVE_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * * @return If NULL is returned, then the stream buffer cannot be created * because there is insufficient heap memory available for FreeRTOS to allocate * the stream buffer data structures and storage area. A non-NULL value being @@ -137,7 +153,14 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t; * \defgroup xStreamBufferCreate xStreamBufferCreate * \ingroup StreamBufferManagement */ -#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE ) + +#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) \ + xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, NULL, NULL ) + +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define xStreamBufferCreateWithCallback( xBufferSizeBytes, xTriggerLevelBytes, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ + xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pxSendCompletedCallback, pxReceiveCompletedCallback ) +#endif /** * stream_buffer.h @@ -179,6 +202,16 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t; * StaticStreamBuffer_t, which will be used to hold the stream buffer's data * structure. * + * @param pxSendCompletedCallback Callback invoked when number of bytes at least equal to + * trigger level is sent to the stream buffer. If the parameter is NULL, it will use the default + * implementation provided by sbSEND_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * + * @param pxReceiveCompletedCallback Callback invoked when more than zero bytes are read from a + * stream buffer. If the parameter is NULL, it will use the default + * implementation provided by sbRECEIVE_COMPLETED macro. To enable the callback, + * configUSE_SB_COMPLETED_CALLBACK must be set to 1 in FreeRTOSConfig.h. + * * @return If the stream buffer is created successfully then a handle to the * created stream buffer is returned. If either pucStreamBufferStorageArea or * pxStaticstreamBuffer are NULL then NULL is returned. @@ -218,8 +251,14 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t; * \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic * \ingroup StreamBufferManagement */ + #define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) \ - xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer ) + xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer, NULL, NULL ) + +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define xStreamBufferCreateStaticWithCallback( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ + xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) +#endif /** * stream_buffer.h @@ -843,13 +882,18 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf /* Functions below here are not part of the public API. */ StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; + StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, - StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION; + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c index c9f23f1d0f9..74990c3dab6 100644 --- a/portable/Common/mpu_wrappers.c +++ b/portable/Common/mpu_wrappers.c @@ -1429,14 +1429,36 @@ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - BaseType_t xIsMessageBuffer ) /* FREERTOS_SYSTEM_CALL */ + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* FREERTOS_SYSTEM_CALL */ { StreamBufferHandle_t xReturn; BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, xIsMessageBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + /** + * Streambuffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + xPortRaisePrivilege( xRunningPrivileged ); + xReturn = xStreamBufferGenericCreate( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + NULL, + NULL ); + vPortResetPrivilege( xRunningPrivileged ); + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + xReturn = NULL; + } return xReturn; } @@ -1448,14 +1470,38 @@ size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, - StaticStreamBuffer_t * const pxStaticStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* FREERTOS_SYSTEM_CALL */ { StreamBufferHandle_t xReturn; BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, xIsMessageBuffer, pucStreamBufferStorageArea, pxStaticStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + /** + * Streambuffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + xPortRaisePrivilege( xRunningPrivileged ); + xReturn = xStreamBufferGenericCreateStatic( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + pucStreamBufferStorageArea, + pxStaticStreamBuffer, + NULL, + NULL ); + vPortResetPrivilege( xRunningPrivileged ); + } + else + { + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + xReturn = NULL; + } return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index fe6e77df9e1..b9d598e170d 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -69,6 +69,25 @@ ( void ) xTaskResumeAll(); #endif /* sbRECEIVE_COMPLETED */ +/* If user has provided a per-instance receive complete callback, then + * invoke the callback else use the receive complete macro which is provided by default for all instances. + */ +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define prvRECEIVE_COMPLETED( pxStreamBuffer ) \ + { \ + if( pxStreamBuffer->pxReceiveCompletedCallback != NULL ) \ + { \ + pxStreamBuffer->pxReceiveCompletedCallback( pxStreamBuffer, pdFALSE, NULL ); \ + } \ + else \ + { \ + sbRECEIVE_COMPLETED( pxStreamBuffer ); \ + } \ + } +#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + #define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( pxStreamBuffer ) +#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + #ifndef sbRECEIVE_COMPLETED_FROM_ISR #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ @@ -90,9 +109,28 @@ } #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + if( pxStreamBuffer->pxReceiveCompletedCallback != NULL ) \ + { \ + pxStreamBuffer->pxReceiveCompletedCallback( pxStreamBuffer, pdTRUE, pxHigherPriorityTaskWoken ); \ + } \ + else \ + { \ + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); \ + } \ + } +#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) +#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + /* If the user has not provided an application specific Tx notification macro, - * or #defined the notification macro away, them provide a default implementation - * that uses task notifications. */ + * or #defined the notification macro away, then provide a default + * implementation that uses task notifications. + */ #ifndef sbSEND_COMPLETED #define sbSEND_COMPLETED( pxStreamBuffer ) \ vTaskSuspendAll(); \ @@ -108,6 +146,26 @@ ( void ) xTaskResumeAll(); #endif /* sbSEND_COMPLETED */ +/* If user has provided a per-instance send completed callback, then + * invoke the callback else use the send complete macro which is provided by default for all instances. + */ +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define prvSEND_COMPLETED( pxStreamBuffer ) \ + { \ + if( pxStreamBuffer->pxSendCompletedCallback != NULL ) \ + { \ + pxStreamBuffer->pxSendCompletedCallback( pxStreamBuffer, pdFALSE, NULL ); \ + } \ + else \ + { \ + sbSEND_COMPLETED( pxStreamBuffer ); \ + } \ + } +#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( pxStreamBuffer ) +#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + + #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ { \ @@ -127,6 +185,25 @@ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ } #endif /* sbSEND_COMPLETE_FROM_ISR */ + + +#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + if( pxStreamBuffer->pxSendCompletedCallback != NULL ) \ + { \ + pxStreamBuffer->pxSendCompletedCallback( pxStreamBuffer, pdTRUE, pxHigherPriorityTaskWoken ); \ + } \ + else \ + { \ + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); \ + } \ + } +#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) +#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ + /*lint -restore (9026) */ /* The number of bytes used to hold the length of a message in the buffer. */ @@ -153,6 +230,11 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ #endif + + #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */ + StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ + #endif } StreamBuffer_t; /* @@ -226,15 +308,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, uint8_t * const pucBuffer, size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - uint8_t ucFlags ) PRIVILEGED_FUNCTION; + uint8_t ucFlags, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ - #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - BaseType_t xIsMessageBuffer ) + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) { uint8_t * pucAllocatedMemory; uint8_t ucFlags; @@ -289,7 +373,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ xBufferSizeBytes, xTriggerLevelBytes, - ucFlags ); + ucFlags, + pxSendCompletedCallback, + pxReceiveCompletedCallback ); traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); } @@ -300,7 +386,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ @@ -310,7 +395,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, - StaticStreamBuffer_t * const pxStaticStreamBuffer ) + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) { StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ StreamBufferHandle_t xReturn; @@ -360,7 +447,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pucStreamBufferStorageArea, xBufferSizeBytes, xTriggerLevelBytes, - ucFlags ); + ucFlags, + pxSendCompletedCallback, + pxReceiveCompletedCallback ); /* Remember this was statically allocated in case it is ever deleted * again. */ @@ -378,7 +467,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, return xReturn; } - #endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ /*-----------------------------------------------------------*/ @@ -419,6 +507,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn = pdFAIL; + StreamBufferCallbackFunction_t pxSendCallback = NULL, pxReceiveCallback = NULL; #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferNumber; @@ -437,25 +526,32 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* Can only reset a message buffer if there are no tasks blocked on it. */ taskENTER_CRITICAL(); { - if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) + if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) { - if( pxStreamBuffer->xTaskWaitingToSend == NULL ) + #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) { - prvInitialiseNewStreamBuffer( pxStreamBuffer, - pxStreamBuffer->pucBuffer, - pxStreamBuffer->xLength, - pxStreamBuffer->xTriggerLevelBytes, - pxStreamBuffer->ucFlags ); - xReturn = pdPASS; - - #if ( configUSE_TRACE_FACILITY == 1 ) - { - pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; - } - #endif + pxSendCallback = pxStreamBuffer->pxSendCompletedCallback; + pxReceiveCallback = pxStreamBuffer->pxReceiveCompletedCallback; + } + #endif - traceSTREAM_BUFFER_RESET( xStreamBuffer ); + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pxStreamBuffer->pucBuffer, + pxStreamBuffer->xLength, + pxStreamBuffer->xTriggerLevelBytes, + pxStreamBuffer->ucFlags, + pxSendCallback, + pxReceiveCallback ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; } + #endif + + traceSTREAM_BUFFER_RESET( xStreamBuffer ); + + xReturn = pdPASS; } } taskEXIT_CRITICAL(); @@ -653,7 +749,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, /* Was a task waiting for the data? */ if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) { - sbSEND_COMPLETED( pxStreamBuffer ); + prvSEND_COMPLETED( pxStreamBuffer ); } else { @@ -703,7 +799,7 @@ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, /* Was a task waiting for the data? */ if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) { - sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); } else { @@ -858,7 +954,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, if( xReceivedLength != ( size_t ) 0 ) { traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); - sbRECEIVE_COMPLETED( pxStreamBuffer ); + prvRECEIVE_COMPLETED( xStreamBuffer ); } else { @@ -954,7 +1050,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, /* Was a task waiting for space in the buffer? */ if( xReceivedLength != ( size_t ) 0 ) { - sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); } else { @@ -1260,7 +1356,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, uint8_t * const pucBuffer, size_t xBufferSizeBytes, size_t xTriggerLevelBytes, - uint8_t ucFlags ) + uint8_t ucFlags, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) { /* Assert here is deliberately writing to the entire buffer to ensure it can * be written to without generating exceptions, and is setting the buffer to a @@ -1280,6 +1378,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pxStreamBuffer->xLength = xBufferSizeBytes; pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; pxStreamBuffer->ucFlags = ucFlags; + #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) + { + pxStreamBuffer->pxSendCompletedCallback = pxSendCompletedCallback; + pxStreamBuffer->pxReceiveCompletedCallback = pxReceiveCompletedCallback; + } + #else + { + ( void ) pxSendCompletedCallback; + ( void ) pxReceiveCompletedCallback; + } + #endif } #if ( configUSE_TRACE_FACILITY == 1 ) From 8d0804d989efffb3ce5d77848ca90ccbb8ccb5f7 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Tue, 21 Jun 2022 03:34:52 -0700 Subject: [PATCH 056/164] Add configUSE_MUTEXES to function declarations in header (#504) This commit adds the configUSE_MUTEXES guard to the function declarations in semphr.h which are only available when configUSE_MUTEXES is set to 1. It was reported here - https://forums.freertos.org/t/mutex-missing-reference-to-configuse-mutexes-on-the-online-documentation/15231 Signed-off-by: Gaurav Aggarwal --- include/semphr.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/include/semphr.h b/include/semphr.h index 0784ac8f4b8..5226bcb5957 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -731,7 +731,7 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex * \ingroup Semaphores */ -#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) +#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_MUTEXES == 1 ) ) #define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX ) #endif @@ -794,9 +794,9 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic * \ingroup Semaphores */ -#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) +#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_MUTEXES == 1 ) ) #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) -#endif /* configSUPPORT_STATIC_ALLOCATION */ +#endif /** @@ -1126,7 +1126,7 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup vSemaphoreDelete vSemaphoreDelete * \ingroup Semaphores */ -#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) +#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) /** * semphr.h @@ -1143,7 +1143,9 @@ typedef QueueHandle_t SemaphoreHandle_t; * the holder may change between the function exiting and the returned value * being tested. */ -#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + #define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) +#endif /** * semphr.h @@ -1156,7 +1158,9 @@ typedef QueueHandle_t SemaphoreHandle_t; * by a task), return NULL. * */ -#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + #define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) +#endif /** * semphr.h @@ -1170,7 +1174,7 @@ typedef QueueHandle_t SemaphoreHandle_t; * semaphore is not available. * */ -#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) +#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) /** * semphr.h @@ -1184,6 +1188,6 @@ typedef QueueHandle_t SemaphoreHandle_t; * semaphore is not available. * */ -#define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) ) +#define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) ) #endif /* SEMAPHORE_H */ From 0394132383b8212f4df68ca65500409d08f65485 Mon Sep 17 00:00:00 2001 From: Graham Sanderson Date: Wed, 22 Jun 2022 12:27:26 -0500 Subject: [PATCH 057/164] RP2040: Remove incorrect assertion (#508) After the xEventGroupWaitBits in vProtLockInternalSpinUnlockWithWait there was an assertion about pxYiledSpinLock being NULL, however when xEventGroupWaitBits returns, IRQs have been re-enabled and so it is no longer safe to assert on the state which is protected by IRQs being disabled. Co-authored-by: graham sanderson --- portable/ThirdParty/GCC/RP2040/port.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index e03f3dc30da..dbd54f243bc 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -1000,12 +1000,6 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) ulYieldSpinLockSaveValue = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, portMAX_DELAY); - - #if ( configNUM_CORES == 1 ) - /* sanity check that interrupts were disabled, then re-enabled during the call, which will have - * taken care of the yield. This should be checked with interrupt were disabled in SMP. */ - configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); - #endif } } From d93359173e64df922cc7b5bdc528d4b8f55fa53b Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Thu, 23 Jun 2022 10:17:17 -0700 Subject: [PATCH 058/164] Ensure that xTaskGetCurrentTaskHandle is included (#507) This commits adds a check that INCLUDE_xTaskGetCurrentTaskHandle is set to 1. A compile time error message is produced if it is not set to 1. This is needed because stream_buffer.c uses xTaskGetCurrentTaskHandle. This was reported here - https://forums.freertos.org/t/xstreambufferreceive-include-xtaskgetcur/15283 Signed-off-by: Gaurav Aggarwal --- stream_buffer.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stream_buffer.c b/stream_buffer.c index b9d598e170d..e8a8839834f 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -44,6 +44,10 @@ #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c #endif +#if ( INCLUDE_xTaskGetCurrentTaskHandle != 1 ) + #error INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 to build stream_buffer.c +#endif + /* Lint e961, e9021 and e750 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * for the header files above, but not in this file, in order to generate the From 2b8c0176e4e501291b29eeaeb4f83d95e986a7fb Mon Sep 17 00:00:00 2001 From: Graham Sanderson Date: Fri, 24 Jun 2022 06:52:49 -0500 Subject: [PATCH 059/164] RP2040: Allow FreeRTOS to be added to the parent CMake project post initialization of the Pico SDK (#497) Co-authored-by: graham sanderson --- portable/ThirdParty/GCC/RP2040/CMakeLists.txt | 37 ++++++++++++------- portable/ThirdParty/GCC/RP2040/README.md | 20 ++++++++-- portable/ThirdParty/GCC/RP2040/library.cmake | 5 +++ 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/CMakeLists.txt b/portable/ThirdParty/GCC/RP2040/CMakeLists.txt index 2471cf4b6a2..c953f7573b0 100644 --- a/portable/ThirdParty/GCC/RP2040/CMakeLists.txt +++ b/portable/ThirdParty/GCC/RP2040/CMakeLists.txt @@ -21,20 +21,31 @@ if (NOT TARGET _FreeRTOS_kernel_inclusion_marker) pico_is_top_level_project(FREERTOS_KERNEL_TOP_LEVEL_PROJECT) - # The real work gets done in library.cmake which is called at the end of pico_sdk_init - list(APPEND PICO_SDK_POST_LIST_FILES ${CMAKE_CURRENT_LIST_DIR}/library.cmake) - - # We need to inject the following header file into ALL SDK files (which we do via the config header) - list(APPEND PICO_CONFIG_HEADER_FILES ${CMAKE_CURRENT_LIST_DIR}/include/freertos_sdk_config.h) - - if (FREERTOS_KERNEL_TOP_LEVEL_PROJECT) - message("FreeRTOS: initialize SDK since we're the top-level") - # Initialize the SDK - pico_sdk_init() + # if the SDK has already been initialized, then just add our libraries now - this allows + # this FreeRTOS port to just be added as a sub-directory or include within another project, rather than + # having to include it at the top level before pico_sdk_init() + if (TARGET _pico_sdk_inclusion_marker) + if (PICO_SDK_VERSION_STRING VERSION_LESS "1.3.2") + message(FATAL_ERROR "Require at least Raspberry Pi Pico SDK version 1.3.2 to include FreeRTOS after pico_sdk_init()") + endif() + include(${CMAKE_CURRENT_LIST_DIR}/library.cmake) else() - set(PICO_SDK_POST_LIST_FILES ${PICO_SDK_POST_LIST_FILES} PARENT_SCOPE) - set(PICO_CONFIG_HEADER_FILES ${PICO_CONFIG_HEADER_FILES} PARENT_SCOPE) - set(FREERTOS_KERNEL_PATH ${FREERTOS_KERNEL_PATH} PARENT_SCOPE) + # The real work gets done in library.cmake which is called at the end of pico_sdk_init + list(APPEND PICO_SDK_POST_LIST_FILES ${CMAKE_CURRENT_LIST_DIR}/library.cmake) + if (PICO_SDK_VERSION_STRING VERSION_LESS "1.3.2") + # We need to inject the following header file into ALL SDK files (which we do via the config header) + list(APPEND PICO_CONFIG_HEADER_FILES ${CMAKE_CURRENT_LIST_DIR}/include/freertos_sdk_config.h) + endif() + + if (FREERTOS_KERNEL_TOP_LEVEL_PROJECT) + message("FreeRTOS: initialize SDK since we're the top-level") + # Initialize the SDK + pico_sdk_init() + else() + set(FREERTOS_KERNEL_PATH ${FREERTOS_KERNEL_PATH} PARENT_SCOPE) + set(PICO_CONFIG_HEADER_FILES ${PICO_CONFIG_HEADER_FILES} PARENT_SCOPE) + set(PICO_SDK_POST_LIST_FILES ${PICO_SDK_POST_LIST_FILES} PARENT_SCOPE) + endif() endif() endif() diff --git a/portable/ThirdParty/GCC/RP2040/README.md b/portable/ThirdParty/GCC/RP2040/README.md index e3cbdb17c2c..5a0fe4d4326 100644 --- a/portable/ThirdParty/GCC/RP2040/README.md +++ b/portable/ThirdParty/GCC/RP2040/README.md @@ -10,16 +10,28 @@ Note that a FreeRTOS SMP version of this port is also available in the FreeRTOS- ## Using this port -Copy [FreeRTOS-Kernel-import.cmake](FreeRTOS-Kernel-import.cmake) into your project, and -add: +You can copy [FreeRTOS-Kernel-import.cmake](FreeRTOS-Kernel-import.cmake) into your project, and +add the following in your `CMakeLists.txt`: ```cmake import(FreeRTOS_Kernel_import.cmake) ``` -below the usual import of `pico_sdk_import.cmake` +This will locate the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the +`FREERTOS_KERNEL_PATH` variable in your environment or via `-DFREERTOS_KERNEL_PATH=/path/to/FreeRTOS-Kernel` on the CMake command line. + +**NOTE:** If you are using version 1.3.1 or older of the Raspberry Pi Pico SDK then this line must appear before the +`pico_sdk_init()` and will cause FreeRTOS to be included/required in all RP2040 targets in your project. After this SDK +version, you can include the FreeRTOS-Kernel support later in your CMake build (possibly in a subdirectory) and the +FreeRTOS-Kernel support will only apply to those targets which explicitly include FreeRTOS support. + +As an alternative to the `import` statement above, you can just add this directory directly via thw following (with +the same placement restrictions related to the Raspberry Pi Pico SDK version above): + +```cmake +add_subdirectory(path/to/this/directory FreeRTOS-Kernel) +``` -This will find the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the `FREERTOS_KERNEL_PATH` variable in your environment or via `-DFREERTOS_KERNEL_PATH=/path/to/FreeRTOS-Kernel` on the CMake command line. ## Advanced Configuration diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index a50e25be51f..075ec2b88fc 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -16,6 +16,11 @@ target_sources(FreeRTOS-Kernel-Core INTERFACE ) target_include_directories(FreeRTOS-Kernel-Core INTERFACE ${FREERTOS_KERNEL_PATH}/include) +if (PICO_SDK_VERSION_STRING VERSION_GREATER_EQUAL "1.3.2") + target_compile_definitions(FreeRTOS-Kernel-Core INTERFACE + PICO_CONFIG_RTOS_ADAPTER_HEADER=${CMAKE_CURRENT_LIST_DIR}/include/freertos_sdk_config.h) +endif() + add_library(FreeRTOS-Kernel INTERFACE) target_sources(FreeRTOS-Kernel INTERFACE ${CMAKE_CURRENT_LIST_DIR}/port.c From aa8325c2486cafb7a7628a785b06162a0d43a3eb Mon Sep 17 00:00:00 2001 From: Xinyu Zhang <68640626+xinyu-tfm@users.noreply.github.com> Date: Wed, 29 Jun 2022 14:52:30 +0800 Subject: [PATCH 060/164] Update to TF-M version TF-Mv1.6.0 (#517) Signed-off-by: Xinyu Zhang Change-Id: I0c15564b342873f9bd7a8240822e770950a0563e --- portable/ThirdParty/GCC/ARM_CM33_TFM/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md b/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md index 6317c040009..ba62d14a133 100644 --- a/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md +++ b/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md @@ -15,7 +15,7 @@ for Arm M-profile architecture. Please get the details from this [link](https:// * ```os_wrapper_freertos.c``` The implementation of APIs which are defined in ```\ns_interface\os_wrapper\mutex.h``` by tf-m-tests - (tag: TF-Mv1.5.0). The implementation is based on FreeRTOS mutex type semaphore. + (tag: TF-Mv1.5.0 & TF-Mv1.6.0). The implementation is based on FreeRTOS mutex type semaphore. # Usage notes @@ -27,7 +27,7 @@ To build a project based on this port: ### Get the TF-M source code -See the [link](https://git.trustedfirmware.org/TF-M/trusted-firmware-m.git/) to get the source code. This port is based on TF-M version **tag: TF-Mv1.5.0**. +See the [link](https://git.trustedfirmware.org/TF-M/trusted-firmware-m.git/) to get the source code. This port is supported by TF-M version **tag: TF-Mv1.5.0** & **tag: TF-Mv1.6.0**. ### Build TF-M @@ -39,7 +39,7 @@ _**Note:** ```TFM_NS_MANAGE_NSID``` must be configured as "OFF" when building TF Please copy all the files in ```freertos_kernel\portable\GCC\ARM_CM33_NTZ``` into the ```freertos_kernel\portable\ThirdParty\GCC\ARM_CM33_TFM``` folder before using this port. Note that TrustZone is enabled in this port. The TF-M runs in the Secure Side. Please call the API ```tfm_ns_interface_init()``` which is defined in ```\app\tfm_ns_interface.c``` by tf-m-tests -(tag: TF-Mv1.5.0) at the very beginning of your application. Otherwise, it will always fail when calling a TF-M service in the Nonsecure Side. +(tag: TF-Mv1.5.0 & TF-Mv1.6.0) at the very beginning of your application. Otherwise, it will always fail when calling a TF-M service in the Nonsecure Side. ### Configuration in FreeRTOS kernel From 26b31e7f62e397c59d2fd1540e76dee9db820f25 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Wed, 29 Jun 2022 08:01:00 -0700 Subject: [PATCH 061/164] Update submodule pointer of Community Supported Ports (#486) Signed-off-by: Gaurav Aggarwal Co-authored-by: Paul Bartell Co-authored-by: Joseph Julicher --- portable/ThirdParty/Community-Supported-Ports | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portable/ThirdParty/Community-Supported-Ports b/portable/ThirdParty/Community-Supported-Ports index f0618d9e2f4..4273ca3211b 160000 --- a/portable/ThirdParty/Community-Supported-Ports +++ b/portable/ThirdParty/Community-Supported-Ports @@ -1 +1 @@ -Subproject commit f0618d9e2f4c5b0a3e472a2673a090e8ef836258 +Subproject commit 4273ca3211b99914f31518378fb590fbff064953 From 75cd3cd0c7be1f9a282e932b3871dc6110fec561 Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Wed, 29 Jun 2022 22:05:26 -0700 Subject: [PATCH 062/164] Add Cortex M7 r0p1 Errata 837070 workaround to CM4_MPU ports (#513) * Clarify Cortex M7 r0p1 errata number in r0p1 specific port. * Add ARM Cortex M7 r0p0 / r0p1 Errata 837070 workaround to CM4 MPU ports. Optionally, enable the errata workaround by defining configTARGET_ARM_CM7_r0p0 or configTARGET_ARM_CM7_r0p1 in FreeRTOSConfig.h. * Add r0p1 errata support to IAR port as well Signed-off-by: Gaurav Aggarwal * Change macro name to configENABLE_ERRATA_837070_WORKAROUND Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal --- portable/GCC/ARM_CM4_MPU/port.c | 24 +++++++++++++++++++++ portable/GCC/ARM_CM4_MPU/portmacro.h | 31 +++++++++++++++++++-------- portable/GCC/ARM_CM7/r0p1/port.c | 4 ++-- portable/IAR/ARM_CM4F_MPU/port.c | 20 +++++++++++------ portable/IAR/ARM_CM4F_MPU/portasm.s | 6 ++++++ portable/IAR/ARM_CM4F_MPU/portmacro.h | 24 +++++++++++++++------ portable/RVDS/ARM_CM4_MPU/port.c | 24 +++++++++++++++++++++ portable/RVDS/ARM_CM4_MPU/portmacro.h | 13 +++++++++++ 8 files changed, 122 insertions(+), 24 deletions(-) diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index a10d9b97bdd..ecffbfe0011 100644 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -70,6 +70,12 @@ #define portNVIC_SYS_CTRL_STATE_REG ( *( ( volatile uint32_t * ) 0xe000ed24 ) ) #define portNVIC_MEM_FAULT_ENABLE ( 1UL << 16UL ) +/* Constants used to detect Cortex-M7 r0p0 and r0p1 cores, and ensure + * that a work around is active for errata 837070. */ +#define portCPUID ( *( ( volatile uint32_t * ) 0xE000ed00 ) ) +#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) +#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) + /* Constants required to access and manipulate the MPU. */ #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) #define portMPU_REGION_BASE_ADDRESS_REG ( *( ( volatile uint32_t * ) 0xe000ed9C ) ) @@ -410,6 +416,18 @@ BaseType_t xPortStartScheduler( void ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); + /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 + * and r0p1 cores. */ + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); + #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define + * configENABLE_ERRATA_837070_WORKAROUND to 1 in your + * FreeRTOSConfig.h. */ + configASSERT( portCPUID != portCORTEX_M7_r0p1_ID ); + configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); + #endif + #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -587,9 +605,15 @@ void xPortPendSVHandler( void ) " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif " msr basepri, r0 \n" " dsb \n" " isb \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif " bl vTaskSwitchContext \n" " mov r0, #0 \n" " msr basepri, r0 \n" diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index d77522b9e77..cef1b04be65 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -71,6 +71,7 @@ typedef unsigned long UBaseType_t; * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 #endif + /*-----------------------------------------------------------*/ /* MPU specific constants. */ @@ -346,10 +347,16 @@ portFORCE_INLINE static void vPortRaiseBASEPRI( void ) __asm volatile ( - " mov %0, %1 \n"\ - " msr basepri, %0 \n"\ - " isb \n"\ - " dsb \n"\ + " mov %0, %1 \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " msr basepri, %0 \n" + " isb \n" + " dsb \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif : "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" ); } @@ -362,11 +369,17 @@ portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void ) __asm volatile ( - " mrs %0, basepri \n"\ - " mov %1, %2 \n"\ - " msr basepri, %1 \n"\ - " isb \n"\ - " dsb \n"\ + " mrs %0, basepri \n" + " mov %1, %2 \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " msr basepri, %1 \n" + " isb \n" + " dsb \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif : "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" ); diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index df36423a30c..130e1878d6d 100644 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -445,11 +445,11 @@ void xPortPendSVHandler( void ) " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" - " cpsid i \n"/* Errata workaround. */ + " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " msr basepri, r0 \n" " dsb \n" " isb \n" - " cpsie i \n"/* Errata workaround. */ + " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " bl vTaskSwitchContext \n" " mov r0, #0 \n" " msr basepri, r0 \n" diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 5e8c723da46..c9a254131d4 100644 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -98,8 +98,8 @@ #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -/* Constants used to detect a Cortex-M7 r0p1 core, which should use the ARM_CM7 - * r0p1 port. */ +/* Constants used to detect Cortex-M7 r0p0 and r0p1 cores, and ensure + * that a work around is active for errata 837070. */ #define portCPUID ( *( ( volatile uint32_t * ) 0xE000ed00 ) ) #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) @@ -350,11 +350,17 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* This port can be used on all revisions of the Cortex-M7 core other than - * the r0p1 parts. r0p1 parts should use the port from the - * /source/portable/GCC/ARM_CM7/r0p1 directory. */ - configASSERT( portCPUID != portCORTEX_M7_r0p1_ID ); - configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); + /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 + * and r0p1 cores. */ + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); + #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define + * configENABLE_ERRATA_837070_WORKAROUND to 1 in your + * FreeRTOSConfig.h. */ + configASSERT( portCPUID != portCORTEX_M7_r0p1_ID ); + configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); + #endif #if ( configASSERT_DEFINED == 1 ) { diff --git a/portable/IAR/ARM_CM4F_MPU/portasm.s b/portable/IAR/ARM_CM4F_MPU/portasm.s index 1ea6fdae741..f37912b02bd 100644 --- a/portable/IAR/ARM_CM4F_MPU/portasm.s +++ b/portable/IAR/ARM_CM4F_MPU/portasm.s @@ -70,9 +70,15 @@ xPortPendSVHandler: stmdb sp!, {r0, r3} mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif msr basepri, r0 dsb isb + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index ea7ecdd5d55..659fb13ea18 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -73,6 +73,7 @@ typedef unsigned long UBaseType_t; * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 #endif + /*-----------------------------------------------------------*/ /* MPU specific constants. */ @@ -253,12 +254,23 @@ typedef struct MPU_SETTINGS extern void vPortEnterCritical( void ); extern void vPortExitCritical( void ); -#define portDISABLE_INTERRUPTS() \ - { \ - __set_BASEPRI( configMAX_SYSCALL_INTERRUPT_PRIORITY ); \ - __DSB(); \ - __ISB(); \ - } +#if( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + #define portDISABLE_INTERRUPTS() \ + { \ + __disable_interrupt(); \ + __set_BASEPRI( configMAX_SYSCALL_INTERRUPT_PRIORITY ); \ + __DSB(); \ + __ISB(); \ + __enable_interrupt(); \ + } +#else + #define portDISABLE_INTERRUPTS() \ + { \ + __set_BASEPRI( configMAX_SYSCALL_INTERRUPT_PRIORITY ); \ + __DSB(); \ + __ISB(); \ + } +#endif #define portENABLE_INTERRUPTS() __set_BASEPRI( 0 ) #define portENTER_CRITICAL() vPortEnterCritical() diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 4d3c4a4405b..46fe89d43b2 100644 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -59,6 +59,12 @@ #define portNVIC_SYS_CTRL_STATE_REG ( *( ( volatile uint32_t * ) 0xe000ed24 ) ) #define portNVIC_MEM_FAULT_ENABLE ( 1UL << 16UL ) +/* Constants used to detect Cortex-M7 r0p0 and r0p1 cores, and ensure + * that a work around is active for errata 837070. */ +#define portCPUID ( *( ( volatile uint32_t * ) 0xE000ed00 ) ) +#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) +#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) + /* Constants required to access and manipulate the MPU. */ #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) #define portMPU_REGION_BASE_ADDRESS_REG ( *( ( volatile uint32_t * ) 0xe000ed9C ) ) @@ -400,6 +406,18 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); + /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 + * and r0p1 cores. */ + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); + #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define + * configENABLE_ERRATA_837070_WORKAROUND to 1 in your + * FreeRTOSConfig.h. */ + configASSERT( portCPUID != portCORTEX_M7_r0p1_ID ); + configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); + #endif + #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -591,9 +609,15 @@ __asm void xPortPendSVHandler( void ) stmdb sp !, { r0, r3 } mov r0, # configMAX_SYSCALL_INTERRUPT_PRIORITY + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif msr basepri, r0 dsb isb + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index 4e0ad0aeb5c..7c3d7b5624a 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -70,6 +70,7 @@ typedef unsigned long UBaseType_t; * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 #endif + /*-----------------------------------------------------------*/ /* MPU specific constants. */ @@ -334,9 +335,15 @@ static portFORCE_INLINE void vPortRaiseBASEPRI( void ) /* Set BASEPRI to the max syscall priority to effect a critical * section. */ /* *INDENT-OFF* */ + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i + #endif msr basepri, ulNewBASEPRI dsb isb + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i + #endif /* *INDENT-ON* */ } } @@ -366,9 +373,15 @@ static portFORCE_INLINE uint32_t ulPortRaiseBASEPRI( void ) * section. */ /* *INDENT-OFF* */ mrs ulReturn, basepri + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i + #endif msr basepri, ulNewBASEPRI dsb isb + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i + #endif /* *INDENT-ON* */ } From e6b6c694960abe3e68ef8c37ccf82df4f1253919 Mon Sep 17 00:00:00 2001 From: NomiChirps <70026509+NomiChirps@users.noreply.github.com> Date: Mon, 18 Jul 2022 16:05:30 -0700 Subject: [PATCH 063/164] RP2040: Use indirect reference for pxCurrentTCB (#525) --- portable/ThirdParty/GCC/RP2040/port.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index dbd54f243bc..f52b0d8d0a7 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -217,7 +217,7 @@ void vPortStartFirstTask( void ) #if ( configNUM_CORES == 1 ) __asm volatile ( " .syntax unified \n" - " ldr r2, =pxCurrentTCB \n"/* Obtain location of pxCurrentTCB. */ + " ldr r2, pxCurrentTCBConst1 \n"/* Obtain location of pxCurrentTCB. */ " ldr r3, [r2] \n" " ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ " adds r0, #32 \n"/* Discard everything up to r0. */ @@ -231,6 +231,8 @@ void vPortStartFirstTask( void ) " pop {r2} \n"/* Pop and discard XPSR. */ " cpsie i \n"/* The first task has its context and interrupts can be enabled. */ " bx r3 \n"/* Finally, jump to the user defined task code. */ + " .align 4 \n" + "pxCurrentTCBConst1: .word pxCurrentTCB\n" ); #else __asm volatile ( @@ -543,7 +545,7 @@ void xPortPendSVHandler( void ) " .syntax unified \n" " mrs r0, psp \n" " \n" - " ldr r3, =pxCurrentTCB \n"/* Get the location of the current TCB. */ + " ldr r3, pxCurrentTCBConst2 \n"/* Get the location of the current TCB. */ " ldr r2, [r3] \n" " \n" " subs r0, r0, #32 \n"/* Make space for the remaining low registers. */ @@ -613,6 +615,8 @@ void xPortPendSVHandler( void ) " ldmia r0!, {r4-r7} \n"/* Pop low registers. */ " \n" " bx r3 \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" ); #else __asm volatile From 253909e67f78cf050bd1313b91cb52b7b3247be0 Mon Sep 17 00:00:00 2001 From: 0xjakob <18257824+0xjakob@users.noreply.github.com> Date: Tue, 26 Jul 2022 01:05:30 +0800 Subject: [PATCH 064/164] Posix: Removed unused signal set from port (#528) Co-authored-by: Jakob Hasse <0xjakob@users.noreply.github.com> --- portable/ThirdParty/GCC/Posix/port.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index a9347d3388f..98b12941125 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -93,7 +93,6 @@ static inline Thread_t * prvGetThreadFromTask( TaskHandle_t xTask ) /*-----------------------------------------------------------*/ static pthread_once_t hSigSetupThread = PTHREAD_ONCE_INIT; -static sigset_t xResumeSignals; static sigset_t xAllSignals; static sigset_t xSchedulerOriginalSignalMask; static pthread_t hMainThread = ( pthread_t ) NULL; @@ -527,8 +526,6 @@ static void prvSetupSignalsAndSchedulerPolicy( void ) hMainThread = pthread_self(); /* Initialise common signal masks. */ - sigemptyset( &xResumeSignals ); - sigaddset( &xResumeSignals, SIG_RESUME ); sigfillset( &xAllSignals ); /* Don't block SIGINT so this can be used to break into GDB while From 0d12faaadb956e31658d6cf5afbbe8223919d3a1 Mon Sep 17 00:00:00 2001 From: Xin Lin <47510956+xlin7799@users.noreply.github.com> Date: Thu, 28 Jul 2022 10:35:29 -0700 Subject: [PATCH 065/164] Add SBOM Generation in auto_release.yml (#524) --- .github/workflows/auto-release.yml | 28 ++++++++++++++++++++++++---- manifest.yml | 4 ++++ 2 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 manifest.yml diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 75a34cf21cd..3bf820a6f9c 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -13,7 +13,7 @@ on: default: '10.4.4' main_br_version: description: "Version String for task.h on main branch (leave empty to leave as-is)." - require: false + required: false default: '' jobs: @@ -44,14 +44,34 @@ jobs: path: local_kernel fetch-depth: 0 - - name: Release + - name: Configure git identity run: | - # Configure repo for push git config --global user.name ${{ github.actor }} git config --global user.email ${{ github.actor }}@users.noreply.github.com + + - name: create a new branch that references commit id + working-directory: ./local_kernel + run: git checkout -b ${{ github.event.inputs.version_number }} ${{ github.event.inputs.commit_id }} + + - name: Generate SBOM + uses: FreeRTOS/CI-CD-Github-Actions/sbom-generator@main + with: + repo_path: ./local_kernel + source_path: ./ + + - name: commit SBOM file + working-directory: ./local_kernel + run: | + git add . + git commit -m 'Update SBOM' + git push -u origin ${{ github.event.inputs.version_number }} + echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV + + - name: Release + run: | # Install deps and run pip install -r ./tools/.github/scripts/release-requirements.txt - ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ github.event.inputs.commit_id }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} + ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} exit $? env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/manifest.yml b/manifest.yml new file mode 100644 index 00000000000..85b5522139b --- /dev/null +++ b/manifest.yml @@ -0,0 +1,4 @@ +name : "FreeRTOS-Kernel" +version: "v10.4.6" +description: "FreeRTOS Kernel." +license: "MIT" From 1a1cb4098056b37d8129f6b00f8a7bc196d63f7a Mon Sep 17 00:00:00 2001 From: Patrick Oppenlander Date: Tue, 2 Aug 2022 20:39:58 +1000 Subject: [PATCH 066/164] add portDONT_DISCARD to pxCurrentTCB (#479) This fixes link failures with LTO: /tmp/ccJbaKaD.ltrans0.ltrans.o: in function `pxCurrentTCBConst2': /root/project/FreeRTOS/portable/GCC/ARM_CM4F/port.c:249: undefined reference to `pxCurrentTCB' /usr/lib/gcc/arm-none-eabi/11.2.0/../../../../arm-none-eabi/bin/ld: /tmp/ccJbaKaD.ltrans0.ltrans.o: in function `pxCurrentTCBConst': /root/project/FreeRTOS/portable/GCC/ARM_CM4F/port.c:443: undefined reference to `pxCurrentTCB' --- tasks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index 98d4cc66145..32b876fed01 100644 --- a/tasks.c +++ b/tasks.c @@ -380,9 +380,9 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ #if ( configNUM_CORES == 1 ) - PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else - PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif From 60d10cf6846b99005c9283879b35a46cdc41fa00 Mon Sep 17 00:00:00 2001 From: Gavin Lambert Date: Wed, 3 Aug 2022 18:31:18 +1200 Subject: [PATCH 067/164] Implement MicroBlazeV9 stack protection (#523) * Implement stack protection for MicroBlaze (without MPU wrappers) --- portable/GCC/MicroBlazeV9/port.c | 12 +++++ portable/GCC/MicroBlazeV9/portasm.S | 69 ++++++++++++++++++++++----- portable/GCC/MicroBlazeV9/portmacro.h | 5 ++ 3 files changed, 75 insertions(+), 11 deletions(-) diff --git a/portable/GCC/MicroBlazeV9/port.c b/portable/GCC/MicroBlazeV9/port.c index 8e70db9a2d4..5f0e2616d18 100644 --- a/portable/GCC/MicroBlazeV9/port.c +++ b/portable/GCC/MicroBlazeV9/port.c @@ -105,7 +105,11 @@ static XIntc xInterruptControllerInstance; * * See the portable.h header file. */ +#if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters ) +#else StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +#endif { extern void *_SDA2_BASE_, *_SDA_BASE_; const uint32_t ulR2 = ( uint32_t ) &_SDA2_BASE_; @@ -122,6 +126,14 @@ extern void _start1( void ); *pxTopOfStack = ( StackType_t ) 0x00000000; pxTopOfStack--; + #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + /* Store the stack limits. */ + *pxTopOfStack = (StackType_t) (pxTopOfStack + 3); + pxTopOfStack--; + *pxTopOfStack = (StackType_t) pxEndOfStack; + pxTopOfStack--; + #endif + #if( XPAR_MICROBLAZE_USE_FPU != 0 ) /* The FSR value placed in the initial task context is just 0. */ *pxTopOfStack = portINITIAL_FSR; diff --git a/portable/GCC/MicroBlazeV9/portasm.S b/portable/GCC/MicroBlazeV9/portasm.S index 6bea21f9834..937b680d8d6 100644 --- a/portable/GCC/MicroBlazeV9/portasm.S +++ b/portable/GCC/MicroBlazeV9/portasm.S @@ -33,16 +33,6 @@ #include "microblaze_exceptions_g.h" #include "xparameters.h" -/* The context is oversized to allow functions called from the ISR to write -back into the caller stack. */ -#if( XPAR_MICROBLAZE_USE_FPU != 0 ) - #define portCONTEXT_SIZE 136 - #define portMINUS_CONTEXT_SIZE -136 -#else - #define portCONTEXT_SIZE 132 - #define portMINUS_CONTEXT_SIZE -132 -#endif - /* Offsets from the stack pointer at which saved registers are placed. */ #define portR31_OFFSET 4 #define portR30_OFFSET 8 @@ -76,7 +66,31 @@ back into the caller stack. */ #define portR2_OFFSET 120 #define portCRITICAL_NESTING_OFFSET 124 #define portMSR_OFFSET 128 -#define portFSR_OFFSET 132 + +#if( XPAR_MICROBLAZE_USE_FPU != 0 ) + #define portFSR_OFFSET 132 + #if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + #define portSLR_OFFSET 136 + #define portSHR_OFFSET 140 + + #define portCONTEXT_SIZE 144 + #define portMINUS_CONTEXT_SIZE -144 + #else + #define portCONTEXT_SIZE 136 + #define portMINUS_CONTEXT_SIZE -136 + #endif +#else + #if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + #define portSLR_OFFSET 132 + #define portSHR_OFFSET 136 + + #define portCONTEXT_SIZE 140 + #define portMINUS_CONTEXT_SIZE -140 + #else + #define portCONTEXT_SIZE 132 + #define portMINUS_CONTEXT_SIZE -132 + #endif +#endif .extern pxCurrentTCB .extern XIntc_DeviceInterruptHandler @@ -144,6 +158,14 @@ back into the caller stack. */ swi r18, r1, portFSR_OFFSET #endif +#if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + /* Save the stack limits */ + mfs r18, rslr + swi r18, r1, portSLR_OFFSET + mfs r18, rshr + swi r18, r1, portSHR_OFFSET +#endif + /* Save the top of stack value to the TCB. */ lwi r3, r0, pxCurrentTCB sw r1, r0, r3 @@ -156,6 +178,17 @@ back into the caller stack. */ lwi r18, r0, pxCurrentTCB lw r1, r0, r18 +#if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + /* Restore the stack limits -- must not load from r1 (Stack Pointer) + because if the address of load or store instruction is out of range, + it will trigger Stack Protection Violation exception. */ + or r18, r0, r1 + lwi r12, r18, portSLR_OFFSET + mts rslr, r12 + lwi r12, r18, portSHR_OFFSET + mts rshr, r12 +#endif + /* Restore the general registers. */ lwi r31, r1, portR31_OFFSET lwi r30, r1, portR30_OFFSET @@ -252,6 +285,13 @@ _interrupt_handler: /* Switch to the ISR stack. */ lwi r1, r0, pulISRStack +#if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + ori r18, r0, _stack_end + mts rslr, r18 + ori r18, r0, _stack + mts rshr, r18 +#endif + /* The parameter to the interrupt handler. */ ori r5, r0, configINTERRUPT_CONTROLLER_TO_USE @@ -296,6 +336,13 @@ VPortYieldASM: /* Switch to use the ISR stack. */ lwi r1, r0, pulISRStack +#if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) + ori r18, r0, _stack_end + mts rslr, r18 + ori r18, r0, _stack + mts rshr, r18 +#endif + /* Select the next task to execute. */ bralid r15, vTaskSwitchContext or r0, r0, r0 diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index 17166b7cbf0..3df7d5c8e75 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -152,6 +152,11 @@ extern volatile uint32_t ulTaskSwitchRequested; #define portNOP() asm volatile ( "NOP" ) /*-----------------------------------------------------------*/ +#if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) +#define portHAS_STACK_OVERFLOW_CHECKING 1 +#endif +/*-----------------------------------------------------------*/ + /* Task function macros as described on the FreeRTOS.org WEB site. */ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) From c1e4b266d26058cb4200e12b6201322089a2cc0d Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 28 Jun 2022 14:07:17 -0700 Subject: [PATCH 068/164] Update codecov action to v3.1.0 --- .github/workflows/unit-tests.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index b8abd9c8adc..ac398c106dc 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -35,14 +35,12 @@ jobs: make -C FreeRTOS/Test/CMock lcovhtml lcov --config-file FreeRTOS/Test/CMock/lcovrc --summary FreeRTOS/Test/CMock/build/cmock_test.info > FreeRTOS/Test/CMock/build/cmock_test_summary.txt - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3.1.0 with: - files: FreeRTOS/Test/CMock/build/cmock_test.info - working-directory: . - root_dir: FreeRTOS/Source + files: ${{ github.workspace }}/FreeRTOS/Test/CMock/build/cmock_test.info + root_dir: ${{ github.workspace }}/FreeRTOS/Source flags: unittests fail_ci_if_error: false - path_to_write_report: coverage/codecov_report.txt verbose: false - name: Archive code coverage data uses: actions/upload-artifact@v2 From 74ceb4729ad9edec68caee43b24470213dc67c68 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Wed, 3 Aug 2022 13:45:27 -0700 Subject: [PATCH 069/164] Add vPortRemoveInterruptHandler API (#533) * Add xPortRemoveInterruptHandler API This API is added to the MicroBlazeV9 port. It enables the application writer to remove an interrupt handler. This was originally contributed in this PR - https://github.com/FreeRTOS/FreeRTOS-Kernel/pull/523 * Change API signature to return void This makes the API similar to vPortDisableInterrupt. Signed-off-by: Gaurav Aggarwal Co-authored-by: Gavin Lambert --- portable/GCC/MicroBlazeV9/port.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/portable/GCC/MicroBlazeV9/port.c b/portable/GCC/MicroBlazeV9/port.c index 5f0e2616d18..7c605e00337 100644 --- a/portable/GCC/MicroBlazeV9/port.c +++ b/portable/GCC/MicroBlazeV9/port.c @@ -327,7 +327,7 @@ int32_t lReturn; portEXIT_CRITICAL(); } - configASSERT( lReturn ); + configASSERT( lReturn == pdPASS ); } /*-----------------------------------------------------------*/ @@ -345,7 +345,7 @@ int32_t lReturn; XIntc_Disable( &xInterruptControllerInstance, ucInterruptID ); } - configASSERT( lReturn ); + configASSERT( lReturn == pdPASS ); } /*-----------------------------------------------------------*/ @@ -374,6 +374,24 @@ int32_t lReturn; } /*-----------------------------------------------------------*/ +void vPortRemoveInterruptHandler( uint8_t ucInterruptID ) +{ +int32_t lReturn; + + /* An API function is provided to remove an interrupt handler because the + interrupt controller instance variable is private to this file. */ + + lReturn = prvEnsureInterruptControllerIsInitialised(); + + if( lReturn == pdPASS ) + { + XIntc_Disconnect( &xInterruptControllerInstance, ucInterruptID ); + } + + configASSERT( lReturn == pdPASS ); +} +/*-----------------------------------------------------------*/ + static int32_t prvEnsureInterruptControllerIsInitialised( void ) { static int32_t lInterruptControllerInitialised = pdFALSE; From da0210b616873b3337736ea15582e1da0af45ae3 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Thu, 4 Aug 2022 16:41:33 +0530 Subject: [PATCH 070/164] Fix NULL pointer dereference in vPortGetHeapStats When the heap is exhausted (no free block), start and end markers are the only blocks present in the free block list: +---------------+ +-----------> NULL | | | | V | + ----- + + ----- + | | | | | | | | | | | | + ----- + + ----- + xStart pxEnd The code block which traverses the list of free blocks to calculate heap stats used a do..while loop that moved past the end marker when the heap had no free block resulting in a NULL pointer dereference. This commit changes the do..while loop to while loop thereby ensuring that we never move past the end marker. This was reported here - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/534 Signed-off-by: Gaurav Aggarwal --- portable/MemMang/heap_4.c | 4 ++-- portable/MemMang/heap_5.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 90a20c54fc5..834ba2eafc4 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -494,7 +494,7 @@ void vPortGetHeapStats( HeapStats_t * pxHeapStats ) * is initialised automatically when the first allocation is made. */ if( pxBlock != NULL ) { - do + while( pxBlock != pxEnd ) { /* Increment the number of blocks and record the largest block seen * so far. */ @@ -513,7 +513,7 @@ void vPortGetHeapStats( HeapStats_t * pxHeapStats ) /* Move to the next block in the chain until the last block is * reached. */ pxBlock = pxBlock->pxNextFreeBlock; - } while( pxBlock != pxEnd ); + } } } ( void ) xTaskResumeAll(); diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index 4fea255b1b1..193155a4584 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -544,7 +544,7 @@ void vPortGetHeapStats( HeapStats_t * pxHeapStats ) * is initialised automatically when the first allocation is made. */ if( pxBlock != NULL ) { - do + while( pxBlock != pxEnd ) { /* Increment the number of blocks and record the largest block seen * so far. */ @@ -569,7 +569,7 @@ void vPortGetHeapStats( HeapStats_t * pxHeapStats ) /* Move to the next block in the chain until the last block is * reached. */ pxBlock = pxBlock->pxNextFreeBlock; - } while( pxBlock != pxEnd ); + } } } ( void ) xTaskResumeAll(); From 74f54bb41f00aefc99e7539a8530a550f53917cb Mon Sep 17 00:00:00 2001 From: Ravishankar Bhagavandas Date: Thu, 4 Aug 2022 10:07:49 -0700 Subject: [PATCH 071/164] Change type of message buffer handle (#537) --- include/message_buffer.h | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/include/message_buffer.h b/include/message_buffer.h index 32284c4fb23..a0508bba8fd 100644 --- a/include/message_buffer.h +++ b/include/message_buffer.h @@ -80,9 +80,10 @@ * Type by which message buffers are referenced. For example, a call to * xMessageBufferCreate() returns an MessageBufferHandle_t variable that can * then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(), - * etc. + * etc. Message buffer is essentially built as a stream buffer hence its handle + * is also set to same type as a stream buffer handle. */ -typedef void * MessageBufferHandle_t; +typedef StreamBufferHandle_t MessageBufferHandle_t; /*-----------------------------------------------------------*/ @@ -155,11 +156,11 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreate( xBufferSizeBytes ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, NULL, NULL ) + xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xMessageBufferCreateWithCallback( xBufferSizeBytes, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, pxSendCompletedCallback, pxReceiveCompletedCallback ) #endif /** @@ -237,11 +238,11 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, NULL, NULL ) + xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xMessageBufferCreateStaticWithCallback( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) #endif /** @@ -342,7 +343,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) \ - xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + xStreamBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) /** * message_buffer.h @@ -447,7 +448,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) \ - xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + xStreamBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) /** * message_buffer.h @@ -536,7 +537,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) \ - xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + xStreamBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) /** @@ -638,7 +639,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) \ - xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + xStreamBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) /** * message_buffer.h @@ -659,7 +660,7 @@ typedef void * MessageBufferHandle_t; * */ #define vMessageBufferDelete( xMessageBuffer ) \ - vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer ) + vStreamBufferDelete( xMessageBuffer ) /** * message_buffer.h @@ -677,7 +678,7 @@ typedef void * MessageBufferHandle_t; * pdTRUE is returned. Otherwise pdFALSE is returned. */ #define xMessageBufferIsFull( xMessageBuffer ) \ - xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer ) + xStreamBufferIsFull( xMessageBuffer ) /** * message_buffer.h @@ -694,7 +695,7 @@ typedef void * MessageBufferHandle_t; * */ #define xMessageBufferIsEmpty( xMessageBuffer ) \ - xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer ) + xStreamBufferIsEmpty( xMessageBuffer ) /** * message_buffer.h @@ -718,7 +719,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferReset( xMessageBuffer ) \ - xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer ) + xStreamBufferReset( xMessageBuffer ) /** @@ -741,9 +742,9 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferSpaceAvailable( xMessageBuffer ) \ - xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) + xStreamBufferSpacesAvailable( xMessageBuffer ) #define xMessageBufferSpacesAvailable( xMessageBuffer ) \ - xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */ + xStreamBufferSpacesAvailable( xMessageBuffer ) /* Corrects typo in original macro name. */ /** * message_buffer.h @@ -763,7 +764,7 @@ typedef void * MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferNextLengthBytes( xMessageBuffer ) \ - xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION; + xStreamBufferNextMessageLengthBytes( xMessageBuffer ) PRIVILEGED_FUNCTION; /** * message_buffer.h @@ -803,7 +804,7 @@ typedef void * MessageBufferHandle_t; * \ingroup StreamBufferManagement */ #define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) \ - xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + xStreamBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) /** * message_buffer.h @@ -844,7 +845,7 @@ typedef void * MessageBufferHandle_t; * \ingroup StreamBufferManagement */ #define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) \ - xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + xStreamBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) /* *INDENT-OFF* */ #if defined( __cplusplus ) From 343878540d731b3814de0f28e3bba83b51536920 Mon Sep 17 00:00:00 2001 From: Chris Copeland Date: Thu, 4 Aug 2022 11:11:31 -0700 Subject: [PATCH 072/164] Block SIG_RESUME in the main thread of the Posix port so that sigwait works as expected (#532) Co-authored-by: alfred gedeon <28123637+alfred2g@users.noreply.github.com> --- portable/ThirdParty/GCC/Posix/port.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 98b12941125..57905bce383 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -191,13 +191,19 @@ portBASE_TYPE xPortStartScheduler( void ) * Interrupts are disabled here already. */ prvSetupTimerInterrupt(); + /* + * Block SIG_RESUME before starting any tasks so the main thread can sigwait on it. + * To sigwait on an unblocked signal is undefined. + * https://pubs.opengroup.org/onlinepubs/009604499/functions/sigwait.html + */ + sigemptyset( &xSignals ); + sigaddset( &xSignals, SIG_RESUME ); + ( void ) pthread_sigmask( SIG_BLOCK, &xSignals, NULL ); + /* Start the first task. */ vPortStartFirstTask(); /* Wait until signaled by vPortEndScheduler(). */ - sigemptyset( &xSignals ); - sigaddset( &xSignals, SIG_RESUME ); - while( xSchedulerEnd != pdTRUE ) { sigwait( &xSignals, &iSignal ); @@ -543,23 +549,10 @@ static void prvSetupSignalsAndSchedulerPolicy( void ) &xAllSignals, &xSchedulerOriginalSignalMask ); - /* SIG_RESUME is only used with sigwait() so doesn't need a - * handler. */ - sigresume.sa_flags = 0; - sigresume.sa_handler = SIG_IGN; - sigfillset( &sigresume.sa_mask ); - sigtick.sa_flags = 0; sigtick.sa_handler = vPortSystemTickHandler; sigfillset( &sigtick.sa_mask ); - iRet = sigaction( SIG_RESUME, &sigresume, NULL ); - - if( iRet == -1 ) - { - prvFatalError( "sigaction", errno ); - } - iRet = sigaction( SIGALRM, &sigtick, NULL ); if( iRet == -1 ) From 4bad786cac4aafd8c94512266bdbd8f9b30bf71e Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Sun, 7 Aug 2022 22:31:47 +0530 Subject: [PATCH 073/164] Update History.txt (#535) * Update History.txt Signed-off-by: Gaurav Aggarwal --- History.txt | 182 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 130 insertions(+), 52 deletions(-) diff --git a/History.txt b/History.txt index 7f57de148fe..02ebb6a1d49 100644 --- a/History.txt +++ b/History.txt @@ -2,28 +2,106 @@ Changes between FreeRTOS V10.4.6 and TBD Documentation and download available at https://www.FreeRTOS.org/ - + Changed uxAutoReload parameter in timer functions to xAutoReload. The - type is now BaseType_t. This matches the type of pdTRUE and pdFALSE. - The new function xTimerGetAutoReload() provides the auto-reload state as - a BaseType_t. The legacy function uxTimerGetAutoReload is retained with the - original UBaseType_t return value. + + Add support for ARM Cortex-M55. + + Add vectored mode interrupt support to the RISC-V port. + + Heap improvements: + - Add a check to heap_2 to track if a memory block is allocated to + the application or not. The MSB of the size field is used for this + purpose. The same check already exists in heap_4 and heap_5. This + check prevents double free errors. + - Add a new flag configHEAP_CLEAR_MEMORY_ON_FREE to heap_2, heap_4 + and heap_5. If the flag is set in FreeRTOSConfig.h then memory freed using + vPortFree() is automatically cleared to zero. + - Add a new API pvPortCalloc to heap_2, heap_4 and heap_5 which has the same + signature as the standard library calloc function. + + Add the ability to override send and receive completed callbacks for each + instance of a stream buffer or message buffer. Earlier there could be + one send and one receive callback for all instances of stream and message + buffers. Having separate callbacks per instance allows different message + and stream buffers to be used differently - for example, some for inter core + communication and others for same core communication. + The feature can be controlled by setting the configuration option + configUSE_SB_COMPLETED_CALLBACK in FreeRTOSConfig.h. When the option is set to 1, + APIs xStreamBufferCreateWithCallback() or xStreamBufferCreateStaticWithCallback() + (and likewise APIs for message buffer) can be used to create a stream buffer + or message buffer instance with application provided callback overrides. When + the option is set to 0, then the default callbacks as defined by + sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain + backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The + functionality is currently not supported for MPU enabled ports. + + Add support to build and link FreeRTOS using CMake build system. Contributed + by @yhsb2k. + + Add support to generate Software Bill of Materials (SBOM) for every release. + + Add support for 16 MPU regions to the GCC Cortex-M33 ports. + + Add ARM Cortex-M7 r0p0/r0p1 Errata 837070 workaround to ARM CM4 MPU ports. + The application writer needs to define configENABLE_ERRATA_837070_WORKAROUND + when using CM4 MPU ports on a Cortex-M7 r0p0/r0p1 core. + + Add configSYSTICK_CLOCK_HZ to Cortex-M0 ports. This is needed to support + the case when the SysTick timer is not clocked from the same source as the CPU. + + Add hardware stack protection support to MicroBlazeV9 port. This ensures that + the CPU immediately raises Stack Protection Violation exception as soon as any + task violates its stack limits. Contributed by @uecasm. + Introduce the configUSE_MINI_LIST_ITEM configuration option. When this option is set to 1, ListItem_t and MiniLitItem_t remain separate types. However, when configUSE_MINI_LIST_ITEM == 0, MiniLitItem_t and ListItem_t are both typedefs of the same struct xLIST_ITEM. This addresses some issues observed when strict-aliasing and link time optimization are enabled. To maintain backwards compatibility, configUSE_MINI_LIST_ITEM defaults to 1. - + Add the ability to override send and receive completed callbacks for each - instance of a stream buffer or message buffer. The feature can be controlled - by setting the configuration option configUSE_SB_COMPLETED_CALLBACK in - FreeRTOSConfig.h. When the option is set to 1, APIs - xStreamBufferCreateWithCallback() or xStreamBufferCreateStaticWithCallback() - (and likewise APIs from message buffer) can be used to create a stream buffer - or message buffer instance with application provided callback overrides. When - the option is set to 0, then the default callbacks as defined by - sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain - backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The - functionaility is currently not supported for MPU enabled ports. + + Simplify prvInitialiseNewTask to memset newly allocated TCB structures + to zero, and remove code that set individual structure members to zero. + + Add prototype for prvPortYieldFromISR to the POSIX port so that it builds + without any warning with -Wmissing-prototypes compiler option. + + Add top of stack and end of stack to the task info report obtained using + vTaskGetInfo(). Contributed by @shreyasbharath. + + Add a cap to the cRxLock and cTxLock members of the queue data structure. + These locks count the number items received and sent to the queue while + the queue was locked. These are later used to unblock tasks waiting on + the queue when the queue is unlocked. This PR caps the values of the + cRxLock and cTxLock to the number of tasks in the system because we cannot + unblock more tasks than there are in the system. Note that the same assert + could still be triggered is the application creates more than 127 tasks. + + Changed uxAutoReload parameter in timer functions to xAutoReload. The + type is now BaseType_t. This matches the type of pdTRUE and pdFALSE. + The new function xTimerGetAutoReload() provides the auto-reload state as + a BaseType_t. The legacy function uxTimerGetAutoReload is retained with the + original UBaseType_t return value. + + Fix support for user implementations of tickless idle that call + vTaskStepTick() with xExpectedIdleTime ticks to step. The new code + ensures xTickCount reaches xNextTaskUnblockTime inside xTaskIncrementTick() + instead of inside vTaskStepTick(). This fixes the typical case where a task + wakes up one tick late and a rare case assertion failure when xTickCount\ + rolls over. Contributed by @jefftenney. + + Fix deadlock in event groups when pvPortMalloc and vPortFree functions + are protected with a mutex. Contributed by @clemenskresser. + + Fix a warning in tasks.c when compiled with -Wduplicated-branches + GCC option. Contributed by @pierrenoel-bouteville-act. + + Fix compilation error in tasks.c when configSUPPORT_DYNAMIC_ALLOCATION + is set to zero. Contributed by @rdpoor. + + Fix prvWriteMessageToBuffer() function in stream_buffer.c so that it correctly + copies length on big endian platforms too. + + Remove the need for INCLUDE_vTaskSuspend to be set to 1 + when configUSE_TICKLESS_IDLE is enabled. Contributed by @pramithkv. + + Update the RL78 IAR port to the latest version of IAR which uses the + industry standard ELF format as opposed to earlier UBROF object format. + Contributed by @felipe-iar. + + Add tick type is atomic flag when tick count is 16-bit to PIC24 port. This + allows the PIC24 family of 16 bit processors to read the tick count without + a critical section when the tick count is also 16 bits. + + Fix offset-out-of-range errors for GCC CM3/CM4 mpu ports when + Link Time Optimization is enabled. Contributed by @niniemann. + + Remove #error when RISC-V port is compiled on a 64-bit RISC-V platform. + Contributed by @cmdrf. + + Fix ullPortInterruptNesting alignment in Cortex-A53 port so that it is + 8-byte aligned. This fixes the unaligned access exception. Contributed + by @Atomar25. + + Fix Interrupt Handler Register Function and Exception Process in NiosII + Port. Contributed by @ghost. + + Change FreeRTOS IRQ Handler for Cortex-A53 SRE port to store and restore + interrupt acknowledge register. This ensures that the SRE port behavior + matches the Memory Mapped IO port. Contributed by @sviaunxp. + + Update the uncrustify config file to match the version of the uncrustify + used in the CI Action. Also, pin the version of uncrustify in CI. Contributed + by @swaldhoer. Changes between FreeRTOS V10.4.5 and FreeRTOS V10.4.6 released November 12 2021 @@ -64,43 +142,43 @@ Changes between FreeRTOS V10.4.4 and FreeRTOS V10.4.5 released September 10 2021 this change. Changes between FreeRTOS V10.4.3 and FreeRTOS V10.4.4 released May 28 2021 - + Minor performance improvements to xTaskIncrementTick() achieved by providing - macro versions of uxListRemove() and vListInsertEnd(). - + Minor refactor of timers.c that obsoletes the need for the - tmrCOMMAND_START_DONT_TRACE macro and removes the need for timers.c to - post to its own event queue. A consequence of this change is that auto- - reload timers that miss their intended next execution time will execute - again immediately rather than executing again the next time the command - queue is processed. (thanks Jeff Tenney). - + Fix a race condition in the message buffer implementation. The - underlying cause was that length and data bytes are written and read as - two distinct operations, which both modify the size of the buffer. If a - context switch occurs after adding or removing the length bytes, but - before adding or removing the data bytes, then another task may observe - the message buffer in an invalid state. - + The xTaskCreate() and xTaskCreateStatic() functions accept a task priority - as an input parameter. The priority has always been silently capped to - (configMAX_PRIORITIES - 1) should it be set to a value above that priority. - Now values above that priority will also trigger a configASSERT() failure. + + Minor performance improvements to xTaskIncrementTick() achieved by providing + macro versions of uxListRemove() and vListInsertEnd(). + + Minor refactor of timers.c that obsoletes the need for the + tmrCOMMAND_START_DONT_TRACE macro and removes the need for timers.c to + post to its own event queue. A consequence of this change is that auto- + reload timers that miss their intended next execution time will execute + again immediately rather than executing again the next time the command + queue is processed. (thanks Jeff Tenney). + + Fix a race condition in the message buffer implementation. The + underlying cause was that length and data bytes are written and read as + two distinct operations, which both modify the size of the buffer. If a + context switch occurs after adding or removing the length bytes, but + before adding or removing the data bytes, then another task may observe + the message buffer in an invalid state. + + The xTaskCreate() and xTaskCreateStatic() functions accept a task priority + as an input parameter. The priority has always been silently capped to + (configMAX_PRIORITIES - 1) should it be set to a value above that priority. + Now values above that priority will also trigger a configASSERT() failure. + Replace configASSERT( pcQueueName ) in vQueueAddToRegistry with a NULL - pointer check. - + Introduce the configSTACK_ALLOCATION_FROM_SEPARATE_HEAP configuration - constant that enables the stack allocated to tasks to come from a heap other - than the heap used by other memory allocations. This enables stacks to be - placed within special regions, such as fast tightly coupled memory. - + If there is an attempt to add the same queue or semaphore handle to the - queue registry more than once then prior versions would create two separate - entries. Now if this is done the first entry is overwritten rather than - duplicated. - + Update the ESP32 port and TF-M (Trusted Firmware M)code to the latest from - their respective repositories. - + Correct a build error in the POSIX port. - + Additional minor formatting updates, including replacing tabs with spaces - in more files. - + Other minor updates include adding additional configASSERT() checks and - correcting and improving code comments. - + Go look at the smp branch to see the progress towards the Symetric - Multiprocessing Kernel. https://github.com/FreeRTOS/FreeRTOS-Kernel/tree/smp + pointer check. + + Introduce the configSTACK_ALLOCATION_FROM_SEPARATE_HEAP configuration + constant that enables the stack allocated to tasks to come from a heap other + than the heap used by other memory allocations. This enables stacks to be + placed within special regions, such as fast tightly coupled memory. + + If there is an attempt to add the same queue or semaphore handle to the + queue registry more than once then prior versions would create two separate + entries. Now if this is done the first entry is overwritten rather than + duplicated. + + Update the ESP32 port and TF-M (Trusted Firmware M)code to the latest from + their respective repositories. + + Correct a build error in the POSIX port. + + Additional minor formatting updates, including replacing tabs with spaces + in more files. + + Other minor updates include adding additional configASSERT() checks and + correcting and improving code comments. + + Go look at the smp branch to see the progress towards the Symetric + Multiprocessing Kernel. https://github.com/FreeRTOS/FreeRTOS-Kernel/tree/smp Changes between FreeRTOS V10.4.2 and FreeRTOS V10.4.3 released December 14 2020 From 6ac9bf291ca8b16ab69d9774857f0d7c33fb1aad Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Sun, 7 Aug 2022 22:46:11 +0530 Subject: [PATCH 074/164] Add .syntax unified to GCC assembly functions (#538) This fixes the compilation issue with XC32 compiler. It was reported here - https://forums.freertos.org/t/xc32-v4-00-error-with-building-freertos-portasm-c/14357/4 Signed-off-by: Gaurav Aggarwal Co-authored-by: Paul Bartell --- .../non_secure/portable/GCC/ARM_CM23/portasm.c | 18 ++++++++++++++++++ .../portable/GCC/ARM_CM23_NTZ/portasm.c | 14 ++++++++++++++ .../non_secure/portable/GCC/ARM_CM33/portasm.c | 18 ++++++++++++++++++ .../portable/GCC/ARM_CM33_NTZ/portasm.c | 14 ++++++++++++++ portable/GCC/ARM_CM23/non_secure/portasm.c | 18 ++++++++++++++++++ portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c | 14 ++++++++++++++ portable/GCC/ARM_CM33/non_secure/portasm.c | 18 ++++++++++++++++++ portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c | 14 ++++++++++++++ portable/GCC/ARM_CM55/non_secure/portasm.c | 18 ++++++++++++++++++ portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c | 14 ++++++++++++++ 10 files changed, 160 insertions(+) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c index b82bd63bac1..a55b48b600b 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c @@ -137,6 +137,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */ @@ -157,6 +159,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " movs r1, #1 \n"/* r1 = 1. */ " bics r0, r1 \n"/* Clear the bit 0. */ @@ -171,6 +175,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " orrs r0, r1 \n"/* r0 = r0 | r1. */ @@ -185,6 +191,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -206,6 +214,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, PRIMASK \n" " cpsid i \n" " bx lr \n" @@ -218,6 +228,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr PRIMASK, r0 \n" " bx lr \n" ::: "memory" @@ -413,6 +425,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " movs r0, #4 \n" " mov r1, lr \n" " tst r0, r1 \n" @@ -435,6 +449,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ ( { __asm volatile ( + " .syntax unified \n" + " \n" " svc %0 \n"/* Secure context is allocated in the supervisor call. */ " bx lr \n"/* Return. */ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" @@ -446,6 +462,8 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c index c973b38e4fd..40141b44d08 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c @@ -132,6 +132,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */ @@ -152,6 +154,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " movs r1, #1 \n"/* r1 = 1. */ " bics r0, r1 \n"/* Clear the bit 0. */ @@ -166,6 +170,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " orrs r0, r1 \n"/* r0 = r0 | r1. */ @@ -180,6 +186,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -201,6 +209,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, PRIMASK \n" " cpsid i \n" " bx lr \n" @@ -213,6 +223,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr PRIMASK, r0 \n" " bx lr \n" ::: "memory" @@ -348,6 +360,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " movs r0, #4 \n" " mov r1, lr \n" " tst r0, r1 \n" diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c index 16f7e2f2433..1e4f0c98f86 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c @@ -135,6 +135,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -152,6 +154,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -165,6 +169,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -178,6 +184,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -200,6 +208,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -215,6 +225,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -412,6 +424,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" @@ -429,6 +443,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ ( { __asm volatile ( + " .syntax unified \n" + " \n" " svc %0 \n"/* Secure context is allocated in the supervisor call. */ " bx lr \n"/* Return. */ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" @@ -440,6 +456,8 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c index f7e4aed53ba..21b515e0f6a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c @@ -130,6 +130,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -147,6 +149,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -160,6 +164,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -173,6 +179,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -195,6 +203,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -210,6 +220,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -337,6 +349,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c index b82bd63bac1..a55b48b600b 100644 --- a/portable/GCC/ARM_CM23/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23/non_secure/portasm.c @@ -137,6 +137,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */ @@ -157,6 +159,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " movs r1, #1 \n"/* r1 = 1. */ " bics r0, r1 \n"/* Clear the bit 0. */ @@ -171,6 +175,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " orrs r0, r1 \n"/* r0 = r0 | r1. */ @@ -185,6 +191,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -206,6 +214,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, PRIMASK \n" " cpsid i \n" " bx lr \n" @@ -218,6 +228,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr PRIMASK, r0 \n" " bx lr \n" ::: "memory" @@ -413,6 +425,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " movs r0, #4 \n" " mov r1, lr \n" " tst r0, r1 \n" @@ -435,6 +449,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ ( { __asm volatile ( + " .syntax unified \n" + " \n" " svc %0 \n"/* Secure context is allocated in the supervisor call. */ " bx lr \n"/* Return. */ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" @@ -446,6 +462,8 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c index c973b38e4fd..40141b44d08 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c @@ -132,6 +132,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */ @@ -152,6 +154,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " movs r1, #1 \n"/* r1 = 1. */ " bics r0, r1 \n"/* Clear the bit 0. */ @@ -166,6 +170,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " movs r1, #1 \n"/* r1 = 1. */ " orrs r0, r1 \n"/* r0 = r0 | r1. */ @@ -180,6 +186,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -201,6 +209,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, PRIMASK \n" " cpsid i \n" " bx lr \n" @@ -213,6 +223,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr PRIMASK, r0 \n" " bx lr \n" ::: "memory" @@ -348,6 +360,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " movs r0, #4 \n" " mov r1, lr \n" " tst r0, r1 \n" diff --git a/portable/GCC/ARM_CM33/non_secure/portasm.c b/portable/GCC/ARM_CM33/non_secure/portasm.c index 16f7e2f2433..1e4f0c98f86 100644 --- a/portable/GCC/ARM_CM33/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33/non_secure/portasm.c @@ -135,6 +135,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -152,6 +154,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -165,6 +169,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -178,6 +184,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -200,6 +208,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -215,6 +225,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -412,6 +424,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" @@ -429,6 +443,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ ( { __asm volatile ( + " .syntax unified \n" + " \n" " svc %0 \n"/* Secure context is allocated in the supervisor call. */ " bx lr \n"/* Return. */ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" @@ -440,6 +456,8 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c index f7e4aed53ba..21b515e0f6a 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c @@ -130,6 +130,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -147,6 +149,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -160,6 +164,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -173,6 +179,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -195,6 +203,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -210,6 +220,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -337,6 +349,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.c b/portable/GCC/ARM_CM55/non_secure/portasm.c index 3f810056b77..e3a97d5470f 100644 --- a/portable/GCC/ARM_CM55/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55/non_secure/portasm.c @@ -135,6 +135,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -152,6 +154,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -165,6 +169,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -178,6 +184,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -200,6 +208,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -215,6 +225,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -412,6 +424,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" @@ -429,6 +443,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ ( { __asm volatile ( + " .syntax unified \n" + " \n" " svc %0 \n"/* Secure context is allocated in the supervisor call. */ " bx lr \n"/* Return. */ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" @@ -440,6 +456,8 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c index 0c2fac21992..ab6fad63a14 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c @@ -130,6 +130,8 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ " ite ne \n" @@ -147,6 +149,8 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* Read the CONTROL register. */ " bic r0, #1 \n"/* Clear the bit 0. */ " msr control, r0 \n"/* Write back the new CONTROL value. */ @@ -160,6 +164,8 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */ { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, control \n"/* r0 = CONTROL. */ " orr r0, #1 \n"/* r0 = r0 | 1. */ " msr control, r0 \n"/* CONTROL = r0. */ @@ -173,6 +179,8 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ @@ -195,6 +203,8 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT { __asm volatile ( + " .syntax unified \n" + " \n" " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ @@ -210,6 +220,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att { __asm volatile ( + " .syntax unified \n" + " \n" " msr basepri, r0 \n"/* basepri = ulMask. */ " dsb \n" " isb \n" @@ -337,6 +349,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( + " .syntax unified \n" + " \n" " tst lr, #4 \n" " ite eq \n" " mrseq r0, msp \n" From 3e5adfb24354c4efc59b1d680a36563455211fca Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Mon, 8 Aug 2022 21:23:29 +0530 Subject: [PATCH 075/164] Generalize Thread Local Storage (TLS) support (#540) * Generalize Thread Local Storage (TLS) support FreeRTOS's Thread Local Storage (TLS) support used variables and functions from newlib, thereby making the TLS support specific to newlib. This commit generalizes the TLS support so that it can be used with other c-runtime libraries also. The default behavior for newlib support is still kept same for backward compatibility. The application writer would need to set configUSE_C_RUNTIME_TLS_SUPPORT to 1 in their FreeRTOSConfig.h and define the following macros to support TLS for a c-runtime library: 1. configTLS_BLOCK_TYPE - Type used to define the TLS block in TCB. 2. configINIT_TLS_BLOCK( xTLSBlock ) - Allocate and initialize memory block for the task's TLS Block. 3. configSET_TLS_BLOCK( xTLSBlock ) - Switch C-Runtime's TLS Block to point to xTLSBlock. 4. configDEINIT_TLS_BLOCK( xTLSBlock ) - Free up the memory allocated for the task's TLS Block. The following is an example to support TLS for picolibc: #define configUSE_C_RUNTIME_TLS_SUPPORT 1 #define configTLS_BLOCK_TYPE void* #define configINIT_TLS_BLOCK( xTLSBlock ) _init_tls( xTLSBlock ) #define configSET_TLS_BLOCK( xTLSBlock ) _set_tls( xTLSBlock ) #define configDEINIT_TLS_BLOCK( xTLSBlock ) Signed-off-by: Gaurav Aggarwal --- .github/lexicon.txt | 1 + include/FreeRTOS.h | 55 ++++++++++++++++++++++++++++++++++++++++-- tasks.c | 58 +++++++++++++++------------------------------ 3 files changed, 73 insertions(+), 41 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 751013504dc..68a28eb0c5f 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -3090,6 +3090,7 @@ xtimerstartfromisr xtimerstop xtimerstopfromisr xtimertaskhandle +xtlsblock xtos xtriggerlevel xtriggerlevelbytes diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index bed390534ab..b8b236fe009 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -71,9 +71,60 @@ /* Required if struct _reent is used. */ #if ( configUSE_NEWLIB_REENTRANT == 1 ) + +/* Note Newlib support has been included by popular demand, but is not + * used by the FreeRTOS maintainers themselves. FreeRTOS is not + * responsible for resulting newlib operation. User must be familiar with + * newlib and must provide system-wide implementations of the necessary + * stubs. Be warned that (at the time of writing) the current newlib design + * implements a system-wide malloc() that must be provided with locks. + * + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ #include + + #define configUSE_C_RUNTIME_TLS_SUPPORT 1 + + #ifndef configTLS_BLOCK_TYPE + #define configTLS_BLOCK_TYPE struct _reent + #endif + + #ifndef configINIT_TLS_BLOCK + #define configINIT_TLS_BLOCK( xTLSBlock ) _REENT_INIT_PTR( &( xTLSBlock ) ) + #endif + + #ifndef configSET_TLS_BLOCK + #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock ) + #endif + + #ifndef configDEINIT_TLS_BLOCK + #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) + #endif +#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */ + +#ifndef configUSE_C_RUNTIME_TLS_SUPPORT + #define configUSE_C_RUNTIME_TLS_SUPPORT 0 #endif +#if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + + #ifndef configTLS_BLOCK_TYPE + #error Missing definition: configTLS_BLOCK_TYPE must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. + #endif + + #ifndef configINIT_TLS_BLOCK + #error Missing definition: configINIT_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. + #endif + + #ifndef configSET_TLS_BLOCK + #error Missing definition: configSET_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. + #endif + + #ifndef configDEINIT_TLS_BLOCK + #error Missing definition: configDEINIT_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. + #endif +#endif /* if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) */ + /* * Check all the required application specific macros have been defined. * These macros are application specific and (as downloaded) are defined @@ -1347,8 +1398,8 @@ typedef struct xSTATIC_TCB #if ( configGENERATE_RUN_TIME_STATS == 1 ) configRUN_TIME_COUNTER_TYPE ulDummy16; #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - struct _reent xDummy17; + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + configTLS_BLOCK_TYPE xDummy17; #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) uint32_t ulDummy18[ configTASK_NOTIFICATION_ARRAY_ENTRIES ]; diff --git a/tasks.c b/tasks.c index 32b876fed01..24e38c7cb20 100644 --- a/tasks.c +++ b/tasks.c @@ -338,19 +338,8 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - - /* Allocate a Newlib reent structure that is specific to this task. - * Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. - * - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - struct _reent xNewLib_reent; + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) @@ -1604,12 +1593,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - /* Initialise this task's Newlib reent structure. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + /* Allocate and initialize memory for the task's TLS Block. */ + configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock ); } #endif @@ -3020,15 +3007,13 @@ void vTaskStartScheduler( void ) * starts to run. */ portDISABLE_INTERRUPTS(); - #if ( configUSE_NEWLIB_REENTRANT == 1 ) + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - /* Switch Newlib's _impure_ptr variable to point to the _reent - * structure specific to the task that will run first. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + /* Switch C-Runtime's TLS Block to point to the TLS + * block specific to the task that will run first. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); } - #endif /* configUSE_NEWLIB_REENTRANT */ + #endif xNextTaskUnblockTime = portMAX_DELAY; xSchedulerRunning = pdTRUE; @@ -4272,15 +4257,13 @@ BaseType_t xTaskIncrementTick( void ) } #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - /* Switch Newlib's _impure_ptr variable to point to the _reent - * structure specific to this task. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); } - #endif /* configUSE_NEWLIB_REENTRANT */ + #endif } } portRELEASE_ISR_LOCK(); @@ -5295,15 +5278,12 @@ static void prvCheckTasksWaitingTermination( void ) * want to allocate and clean RAM statically. */ portCLEAN_UP_TCB( pxTCB ); - /* Free up the memory allocated by the scheduler for the task. It is up - * to the task to free any memory allocated at the application level. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - #if ( configUSE_NEWLIB_REENTRANT == 1 ) + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + /* Free up the memory allocated for the task's TLS Block. */ + configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); } - #endif /* configUSE_NEWLIB_REENTRANT */ + #endif #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) { From b936cd4ece5be2bb90deedc583e5ab6441caf481 Mon Sep 17 00:00:00 2001 From: Ravishankar Bhagavandas Date: Tue, 9 Aug 2022 09:48:44 -0700 Subject: [PATCH 076/164] Change default value of INCLUDE_xTaskGetCurrentTaskHandle (#542) --- include/FreeRTOS.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index b8b236fe009..326ae0d2635 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -260,7 +260,7 @@ #endif #ifndef INCLUDE_xTaskGetCurrentTaskHandle - #define INCLUDE_xTaskGetCurrentTaskHandle 0 + #define INCLUDE_xTaskGetCurrentTaskHandle 1 #endif #if configUSE_CO_ROUTINES != 0 From 01b99aa1188a0bd99cb4d4559a3f7966ab517d86 Mon Sep 17 00:00:00 2001 From: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Tue, 9 Aug 2022 10:37:24 -0700 Subject: [PATCH 077/164] Include string.h at the top of portable/GCC/ARM_CA9/port.c to prevent memset() generating a warning. (#430) Co-authored-by: none --- portable/GCC/ARM_CA9/port.c | 1 + 1 file changed, 1 insertion(+) diff --git a/portable/GCC/ARM_CA9/port.c b/portable/GCC/ARM_CA9/port.c index 78c8aea7429..6dbccf3df25 100644 --- a/portable/GCC/ARM_CA9/port.c +++ b/portable/GCC/ARM_CA9/port.c @@ -28,6 +28,7 @@ /* Standard includes. */ #include +#include /* Scheduler includes. */ #include "FreeRTOS.h" From fbfffc4e0d946ae8deeddbee20ccffde8820d3ef Mon Sep 17 00:00:00 2001 From: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Sun, 19 Dec 2021 11:18:54 -0800 Subject: [PATCH 078/164] Move some of the complex pre-processor guards on prvWriteNameToBuffer() to compile time checks in FreeRTOS.h. Co-authored-by: Paul Bartell --- include/FreeRTOS.h | 10 ++++++++++ tasks.c | 24 +++++++----------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 326ae0d2635..52aef7b56d7 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1065,6 +1065,16 @@ #define configSUPPORT_DYNAMIC_ALLOCATION 1 #endif +#if ( ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) + #error configUSE_STATS_FORMATTING_FUNCTIONS cannot be used without dynamic allocation, but configSUPPORT_DYNAMIC_ALLOCATION is not set to 1. +#endif + +#if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) + #if( ( configUSE_TRACE_FACILITY != 1 ) && ( configGENERATE_RUN_TIME_STATS != 1 ) ) + #error configUSE_STATS_FORMATTING_FUNCTIONS is 1 but the functions that enables are not used because neither configUSE_TRACE_FACILITY or configGENERATE_RUN_TIME_STATS are 1. Set configUSE_STATS_FORMATTING_FUNCTIONS to 0 in FreeRTOSConfig.h. + #endif +#endif + #ifndef configSTACK_DEPTH_TYPE /* Defaults to uint16_t for backward compatibility, but can be overridden diff --git a/tasks.c b/tasks.c index 24e38c7cb20..bf6e9e128c7 100644 --- a/tasks.c +++ b/tasks.c @@ -601,9 +601,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, */ static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION; -#if ( ( ( configUSE_TRACE_FACILITY == 1 ) || ( configGENERATE_RUN_TIME_STATS == 1 ) ) && \ - ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && \ - ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +#if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) /* * Helper function used to pad task names with spaces when printing out @@ -5912,9 +5910,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* portCRITICAL_NESTING_IN_TCB */ /*-----------------------------------------------------------*/ -#if ( ( ( configUSE_TRACE_FACILITY == 1 ) || ( configGENERATE_RUN_TIME_STATS == 1 ) ) && \ - ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && \ - ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +#if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) static char * prvWriteNameToBuffer( char * pcBuffer, const char * pcTaskName ) @@ -5938,10 +5934,10 @@ static void prvResetNextTaskUnblockTime( void ) return &( pcBuffer[ x ] ); } -#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +#endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ /*-----------------------------------------------------------*/ -#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) void vTaskList( char * pcWriteBuffer ) { @@ -6044,10 +6040,10 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ /*----------------------------------------------------------*/ -#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) ) void vTaskGetRunTimeStats( char * pcWriteBuffer ) { @@ -6055,12 +6051,6 @@ static void prvResetNextTaskUnblockTime( void ) UBaseType_t uxArraySize, x; configRUN_TIME_COUNTER_TYPE ulTotalTime, ulStatsAsPercentage; - #if ( configUSE_TRACE_FACILITY != 1 ) - { - #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). - } - #endif - /* * PLEASE NOTE: * @@ -6171,7 +6161,7 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ /*-----------------------------------------------------------*/ TickType_t uxTaskResetEventItemValue( void ) From 3667d021b92119d095afde87a1b185b4a7790170 Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 9 Aug 2022 11:13:12 -0700 Subject: [PATCH 079/164] Fix formatting of FreeRTOS.h --- include/FreeRTOS.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 52aef7b56d7..368506d00dd 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1066,13 +1066,13 @@ #endif #if ( ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION != 1 ) ) - #error configUSE_STATS_FORMATTING_FUNCTIONS cannot be used without dynamic allocation, but configSUPPORT_DYNAMIC_ALLOCATION is not set to 1. + #error configUSE_STATS_FORMATTING_FUNCTIONS cannot be used without dynamic allocation, but configSUPPORT_DYNAMIC_ALLOCATION is not set to 1. #endif #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) - #if( ( configUSE_TRACE_FACILITY != 1 ) && ( configGENERATE_RUN_TIME_STATS != 1 ) ) - #error configUSE_STATS_FORMATTING_FUNCTIONS is 1 but the functions that enables are not used because neither configUSE_TRACE_FACILITY or configGENERATE_RUN_TIME_STATS are 1. Set configUSE_STATS_FORMATTING_FUNCTIONS to 0 in FreeRTOSConfig.h. - #endif + #if ( ( configUSE_TRACE_FACILITY != 1 ) && ( configGENERATE_RUN_TIME_STATS != 1 ) ) + #error configUSE_STATS_FORMATTING_FUNCTIONS is 1 but the functions that enables are not used because neither configUSE_TRACE_FACILITY or configGENERATE_RUN_TIME_STATS are 1. Set configUSE_STATS_FORMATTING_FUNCTIONS to 0 in FreeRTOSConfig.h. + #endif #endif #ifndef configSTACK_DEPTH_TYPE From 9a6713e740d97906ec58566145d6158e6ab68960 Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 9 Aug 2022 15:48:56 -0700 Subject: [PATCH 080/164] correct grammar in include/FreeRTOS.h Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> --- include/FreeRTOS.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 368506d00dd..7c2566e012b 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1071,7 +1071,7 @@ #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) #if ( ( configUSE_TRACE_FACILITY != 1 ) && ( configGENERATE_RUN_TIME_STATS != 1 ) ) - #error configUSE_STATS_FORMATTING_FUNCTIONS is 1 but the functions that enables are not used because neither configUSE_TRACE_FACILITY or configGENERATE_RUN_TIME_STATS are 1. Set configUSE_STATS_FORMATTING_FUNCTIONS to 0 in FreeRTOSConfig.h. + #error configUSE_STATS_FORMATTING_FUNCTIONS is 1 but the functions it enables are not used because neither configUSE_TRACE_FACILITY or configGENERATE_RUN_TIME_STATS are 1. Set configUSE_STATS_FORMATTING_FUNCTIONS to 0 in FreeRTOSConfig.h. #endif #endif From f2dc981ecc58f7e9467988345e97a3b084216935 Mon Sep 17 00:00:00 2001 From: Archit Gupta <71798289+archigup@users.noreply.github.com> Date: Tue, 16 Aug 2022 04:11:17 -0700 Subject: [PATCH 081/164] Fix warnings in posix port (#544) Fixes warnings about unused parameters and variables when built with `-Wall -Wextra`. --- portable/ThirdParty/GCC/Posix/port.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 57905bce383..c18705a4949 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -325,6 +325,7 @@ portBASE_TYPE xPortSetInterruptMask( void ) void vPortClearInterruptMask( portBASE_TYPE xMask ) { + ( void ) xMask; } /*-----------------------------------------------------------*/ @@ -385,6 +386,8 @@ static void vPortSystemTickHandler( int sig ) Thread_t * pxThreadToSuspend; Thread_t * pxThreadToResume; + ( void ) sig; + /* uint64_t xExpectedTicks; */ uxCriticalNesting++; /* Signals are blocked in this signal handler. */ @@ -425,6 +428,8 @@ void vPortThreadDying( void * pxTaskToDelete, { Thread_t * pxThread = prvGetThreadFromTask( pxTaskToDelete ); + ( void ) pxPendYield; + pxThread->xDying = pdTRUE; } @@ -526,7 +531,7 @@ static void prvResumeThread( Thread_t * xThreadId ) static void prvSetupSignalsAndSchedulerPolicy( void ) { - struct sigaction sigresume, sigtick; + struct sigaction sigtick; int iRet; hMainThread = pthread_self(); From c9d3b111b670be5691b50cfd0377a050bd1a7131 Mon Sep 17 00:00:00 2001 From: Monika Singh <108652024+moninom1@users.noreply.github.com> Date: Fri, 19 Aug 2022 15:51:57 +0530 Subject: [PATCH 082/164] Add support for MISRA rule 20.7 (#546) Misra rule 20.7 requires parenthesis to all parameter names in macro definitions. The issue was reported here : https://forums.freertos.org/t/misra-20-7-compatibility/15385 --- .github/workflows/unit-tests.yml | 2 +- croutine.c | 14 ++--- include/FreeRTOS.h | 12 ++-- include/event_groups.h | 6 +- include/message_buffer.h | 20 +++---- include/mpu_wrappers.h | 6 +- include/queue.h | 2 +- include/semphr.h | 4 +- include/stream_buffer.h | 8 +-- include/task.h | 6 +- stream_buffer.c | 94 ++++++++++++++++---------------- tasks.c | 2 +- 12 files changed, 88 insertions(+), 88 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index ac398c106dc..0039e5b1d53 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -24,7 +24,7 @@ jobs: - name: Install packages run: | - sudo apt-get install lcov cflow ruby doxygen build-essential + sudo apt-get install lcov cflow ruby doxygen build-essential unifdef - name: Run Unit Tests with ENABLE_SANITIZER=1 run: | make -C FreeRTOS/Test/CMock clean diff --git a/croutine.c b/croutine.c index 27ef603c253..a17475aa6e5 100644 --- a/croutine.c +++ b/croutine.c @@ -65,13 +65,13 @@ * This macro accesses the co-routine ready lists and therefore must not be * used from within an ISR. */ - #define prvAddCoRoutineToReadyQueue( pxCRCB ) \ - { \ - if( pxCRCB->uxPriority > uxTopCoRoutineReadyPriority ) \ - { \ - uxTopCoRoutineReadyPriority = pxCRCB->uxPriority; \ - } \ - vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ pxCRCB->uxPriority ] ), &( pxCRCB->xGenericListItem ) ); \ + #define prvAddCoRoutineToReadyQueue( pxCRCB ) \ + { \ + if( ( pxCRCB )->uxPriority > uxTopCoRoutineReadyPriority ) \ + { \ + uxTopCoRoutineReadyPriority = ( pxCRCB )->uxPriority; \ + } \ + vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ ( pxCRCB )->uxPriority ] ), &( ( pxCRCB )->xGenericListItem ) ); \ } /* diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 7c2566e012b..fa7dac347ba 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -464,11 +464,11 @@ #endif #ifndef portCLEAR_INTERRUPT_MASK_FROM_ISR - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) ( void ) uxSavedStatusValue + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) ( void ) ( uxSavedStatusValue ) #endif #ifndef portCLEAN_UP_TCB - #define portCLEAN_UP_TCB( pxTCB ) ( void ) pxTCB + #define portCLEAN_UP_TCB( pxTCB ) ( void ) ( pxTCB ) #endif #ifndef portPRE_TASK_DELETE_HOOK @@ -476,7 +476,7 @@ #endif #ifndef portSETUP_TCB - #define portSETUP_TCB( pxTCB ) ( void ) pxTCB + #define portSETUP_TCB( pxTCB ) ( void ) ( pxTCB ) #endif #ifndef configQUEUE_REGISTRY_SIZE @@ -788,7 +788,7 @@ #endif #ifndef traceEVENT_GROUP_SYNC_END - #define traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred + #define traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) ( xTimeoutOccurred ) #endif #ifndef traceEVENT_GROUP_WAIT_BITS_BLOCK @@ -796,7 +796,7 @@ #endif #ifndef traceEVENT_GROUP_WAIT_BITS_END - #define traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred + #define traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) ( xTimeoutOccurred ) #endif #ifndef traceEVENT_GROUP_CLEAR_BITS @@ -1139,7 +1139,7 @@ #define portTICK_TYPE_ENTER_CRITICAL() #define portTICK_TYPE_EXIT_CRITICAL() #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() 0 - #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) ( void ) x + #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) ( void ) ( x ) #endif /* if ( portTICK_TYPE_IS_ATOMIC == 0 ) */ /* Definitions to allow backward compatibility with FreeRTOS versions prior to diff --git a/include/event_groups.h b/include/event_groups.h index 7a6980583e2..949ddd9143f 100644 --- a/include/event_groups.h +++ b/include/event_groups.h @@ -423,7 +423,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; #else #define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) \ - xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ) + xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) ( xEventGroup ), ( uint32_t ) ( uxBitsToClear ), NULL ) #endif /** @@ -579,7 +579,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; #else #define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) \ - xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ) + xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) ( xEventGroup ), ( uint32_t ) ( uxBitsToSet ), ( pxHigherPriorityTaskWoken ) ) #endif /** @@ -728,7 +728,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, * \defgroup xEventGroupGetBits xEventGroupGetBits * \ingroup EventGroup */ -#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 ) +#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( ( xEventGroup ), 0 ) /** * event_groups.h diff --git a/include/message_buffer.h b/include/message_buffer.h index a0508bba8fd..32e0514387f 100644 --- a/include/message_buffer.h +++ b/include/message_buffer.h @@ -156,11 +156,11 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreate( xBufferSizeBytes ) \ - xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, NULL, NULL ) + xStreamBufferGenericCreate( ( xBufferSizeBytes ), ( size_t ) 0, pdTRUE, NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xMessageBufferCreateWithCallback( xBufferSizeBytes, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreate( ( xBufferSizeBytes ), ( size_t ) 0, pdTRUE, ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif /** @@ -238,11 +238,11 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) \ - xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, NULL, NULL ) + xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), 0, pdTRUE, ( pucMessageBufferStorageArea ), ( pxStaticMessageBuffer ), NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xMessageBufferCreateStaticWithCallback( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), 0, pdTRUE, ( pucMessageBufferStorageArea ), ( pxStaticMessageBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif /** @@ -343,7 +343,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) \ - xStreamBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + xStreamBufferSend( ( xMessageBuffer ), ( pvTxData ), ( xDataLengthBytes ), ( xTicksToWait ) ) /** * message_buffer.h @@ -448,7 +448,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) \ - xStreamBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + xStreamBufferSendFromISR( ( xMessageBuffer ), ( pvTxData ), ( xDataLengthBytes ), ( pxHigherPriorityTaskWoken ) ) /** * message_buffer.h @@ -537,7 +537,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) \ - xStreamBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + xStreamBufferReceive( ( xMessageBuffer ), ( pvRxData ), ( xBufferLengthBytes ), ( xTicksToWait ) ) /** @@ -639,7 +639,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup MessageBufferManagement */ #define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) \ - xStreamBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + xStreamBufferReceiveFromISR( ( xMessageBuffer ), ( pvRxData ), ( xBufferLengthBytes ), ( pxHigherPriorityTaskWoken ) ) /** * message_buffer.h @@ -804,7 +804,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup StreamBufferManagement */ #define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) \ - xStreamBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) + xStreamBufferSendCompletedFromISR( ( xMessageBuffer ), ( pxHigherPriorityTaskWoken ) ) /** * message_buffer.h @@ -845,7 +845,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; * \ingroup StreamBufferManagement */ #define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) \ - xStreamBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) + xStreamBufferReceiveCompletedFromISR( ( xMessageBuffer ), ( pxHigherPriorityTaskWoken ) ) /* *INDENT-OFF* */ #if defined( __cplusplus ) diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h index 6da06e7f556..750d0307db6 100644 --- a/include/mpu_wrappers.h +++ b/include/mpu_wrappers.h @@ -182,10 +182,10 @@ #define xPortRaisePrivilege( xRunningPrivileged ) \ { \ /* Check whether the processor is already privileged. */ \ - xRunningPrivileged = portIS_PRIVILEGED(); \ + ( xRunningPrivileged ) = portIS_PRIVILEGED(); \ \ /* If the processor is not already privileged, raise privilege. */ \ - if( xRunningPrivileged == pdFALSE ) \ + if( ( xRunningPrivileged ) == pdFALSE ) \ { \ portRAISE_PRIVILEGE(); \ } \ @@ -197,7 +197,7 @@ */ #define vPortResetPrivilege( xRunningPrivileged ) \ { \ - if( xRunningPrivileged == pdFALSE ) \ + if( ( xRunningPrivileged ) == pdFALSE ) \ { \ portRESET_PRIVILEGE(); \ } \ diff --git a/include/queue.h b/include/queue.h index bb59638159b..426de468fe0 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1478,7 +1478,7 @@ BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; * Reset a queue back to its original empty state. The return value is now * obsolete and is always set to pdPASS. */ -#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE ) +#define xQueueReset( xQueue ) xQueueGenericReset( ( xQueue ), pdFALSE ) /* * The registry is provided as a means for kernel aware debuggers to diff --git a/include/semphr.h b/include/semphr.h index 5226bcb5957..b8575b90e9d 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -225,7 +225,7 @@ typedef QueueHandle_t SemaphoreHandle_t; * \ingroup Semaphores */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, ( pxStaticSemaphore ), queueQUEUE_TYPE_BINARY_SEMAPHORE ) #endif /* configSUPPORT_STATIC_ALLOCATION */ /** @@ -940,7 +940,7 @@ typedef QueueHandle_t SemaphoreHandle_t; * \ingroup Semaphores */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) - #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, ( pxStaticSemaphore ) ) #endif /* configSUPPORT_STATIC_ALLOCATION */ /** diff --git a/include/stream_buffer.h b/include/stream_buffer.h index a3a263d95c3..7ab409d89ef 100644 --- a/include/stream_buffer.h +++ b/include/stream_buffer.h @@ -155,11 +155,11 @@ typedef void (* StreamBufferCallbackFunction_t)( StreamBufferHandle_t xStreamBuf */ #define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) \ - xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, NULL, NULL ) + xStreamBufferGenericCreate( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xStreamBufferCreateWithCallback( xBufferSizeBytes, xTriggerLevelBytes, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreate( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif /** @@ -253,11 +253,11 @@ typedef void (* StreamBufferCallbackFunction_t)( StreamBufferHandle_t xStreamBuf */ #define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) \ - xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer, NULL, NULL ) + xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, ( pucStreamBufferStorageArea ), ( pxStaticStreamBuffer ), NULL, NULL ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define xStreamBufferCreateStaticWithCallback( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) \ - xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ) + xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, ( pucStreamBufferStorageArea ), ( pxStaticStreamBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif /** diff --git a/include/task.h b/include/task.h index 18b454abac5..961f9cc1952 100644 --- a/include/task.h +++ b/include/task.h @@ -907,9 +907,9 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, * vTaskDelayUntil() is the older version of xTaskDelayUntil() and does not * return a value. */ -#define vTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ) \ - { \ - ( void ) xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); \ +#define vTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ) \ + { \ + ( void ) xTaskDelayUntil( ( pxPreviousWakeTime ), ( xTimeIncrement ) ); \ } diff --git a/stream_buffer.c b/stream_buffer.c index e8a8839834f..b81f072fb1a 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -77,19 +77,19 @@ * invoke the callback else use the receive complete macro which is provided by default for all instances. */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) - #define prvRECEIVE_COMPLETED( pxStreamBuffer ) \ - { \ - if( pxStreamBuffer->pxReceiveCompletedCallback != NULL ) \ - { \ - pxStreamBuffer->pxReceiveCompletedCallback( pxStreamBuffer, pdFALSE, NULL ); \ - } \ - else \ - { \ - sbRECEIVE_COMPLETED( pxStreamBuffer ); \ - } \ + #define prvRECEIVE_COMPLETED( pxStreamBuffer ) \ + { \ + if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ + { \ + ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ + } \ + else \ + { \ + sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ); \ + } \ } #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ - #define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( pxStreamBuffer ) + #define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #ifndef sbRECEIVE_COMPLETED_FROM_ISR @@ -105,7 +105,7 @@ ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ ( uint32_t ) 0, \ eNoAction, \ - pxHigherPriorityTaskWoken ); \ + ( pxHigherPriorityTaskWoken ) ); \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ @@ -114,21 +114,21 @@ #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) - #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ - pxHigherPriorityTaskWoken ) \ - { \ - if( pxStreamBuffer->pxReceiveCompletedCallback != NULL ) \ - { \ - pxStreamBuffer->pxReceiveCompletedCallback( pxStreamBuffer, pdTRUE, pxHigherPriorityTaskWoken ); \ - } \ - else \ - { \ - sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); \ - } \ + #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ + { \ + ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ + } \ + else \ + { \ + sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ + } \ } #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) + sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ /* If the user has not provided an application specific Tx notification macro, @@ -154,19 +154,19 @@ * invoke the callback else use the send complete macro which is provided by default for all instances. */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) - #define prvSEND_COMPLETED( pxStreamBuffer ) \ - { \ - if( pxStreamBuffer->pxSendCompletedCallback != NULL ) \ - { \ - pxStreamBuffer->pxSendCompletedCallback( pxStreamBuffer, pdFALSE, NULL ); \ - } \ - else \ - { \ - sbSEND_COMPLETED( pxStreamBuffer ); \ - } \ + #define prvSEND_COMPLETED( pxStreamBuffer ) \ + { \ + if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ + { \ + pxStreamBuffer->pxSendCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ + } \ + else \ + { \ + sbSEND_COMPLETED( ( pxStreamBuffer ) ); \ + } \ } #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ - #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( pxStreamBuffer ) + #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -182,7 +182,7 @@ ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ ( uint32_t ) 0, \ eNoAction, \ - pxHigherPriorityTaskWoken ); \ + ( pxHigherPriorityTaskWoken ) ); \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ @@ -192,20 +192,20 @@ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) - #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ - if( pxStreamBuffer->pxSendCompletedCallback != NULL ) \ - { \ - pxStreamBuffer->pxSendCompletedCallback( pxStreamBuffer, pdTRUE, pxHigherPriorityTaskWoken ); \ - } \ - else \ - { \ - sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); \ - } \ + #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ + { \ + ( pxStreamBuffer )->pxSendCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ + } \ + else \ + { \ + sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ + } \ } #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) + sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ /*lint -restore (9026) */ diff --git a/tasks.c b/tasks.c index bf6e9e128c7..3d7c4bd35ec 100644 --- a/tasks.c +++ b/tasks.c @@ -175,7 +175,7 @@ * architecture being used. */ /* A port optimised version is provided. Call the port defined macros. */ - #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority ) /*-----------------------------------------------------------*/ From 3e6dbc75beca15211cfd2fbc74bde9672ec9471a Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Mon, 22 Aug 2022 20:58:07 +0530 Subject: [PATCH 083/164] Add FreeRTOS config directory to include dirs (#548) This allows the application write to set FREERTOS_CONFIG_FILE_DIRECTORY to whichever directory the FreeRTOSConfig.h file exists in. This was reported here - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/545 Signed-off-by: Gaurav Aggarwal --- portable/ThirdParty/GCC/RP2040/library.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index 075ec2b88fc..6b125fab8b8 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -27,7 +27,8 @@ target_sources(FreeRTOS-Kernel INTERFACE ) target_include_directories(FreeRTOS-Kernel INTERFACE - ${CMAKE_CURRENT_LIST_DIR}/include) + ${CMAKE_CURRENT_LIST_DIR}/include + ${FREERTOS_CONFIG_FILE_DIRECTORY}) target_link_libraries(FreeRTOS-Kernel INTERFACE FreeRTOS-Kernel-Core From d23da2ecc2f8a0692763633ce14dda34c950f506 Mon Sep 17 00:00:00 2001 From: Octaviarius Date: Tue, 30 Aug 2022 23:27:39 +0300 Subject: [PATCH 084/164] [Fix] Type for pointers operations (#550) * fix type for pointers operations in some places: size_t -> portPOINTER_SIZE_TYPE * fix pointer arithmetics * fix xAddress type --- portable/MemMang/heap_2.c | 2 +- portable/MemMang/heap_4.c | 18 +++++++++--------- portable/MemMang/heap_5.c | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/portable/MemMang/heap_2.c b/portable/MemMang/heap_2.c index 0700a0d7a97..a22163eacf7 100644 --- a/portable/MemMang/heap_2.c +++ b/portable/MemMang/heap_2.c @@ -353,7 +353,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* To start with there is a single free block that is sized to take up the * entire heap space. */ - pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; pxFirstFreeBlock->xBlockSize = configADJUSTED_HEAP_SIZE; pxFirstFreeBlock->pxNextFreeBlock = &xEnd; } diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 834ba2eafc4..f61162a64c9 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -377,17 +377,17 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { BlockLink_t * pxFirstFreeBlock; uint8_t * pucAlignedHeap; - size_t uxAddress; + portPOINTER_SIZE_TYPE uxAddress; size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; /* Ensure the heap starts on a correctly aligned boundary. */ - uxAddress = ( size_t ) ucHeap; + uxAddress = ( portPOINTER_SIZE_TYPE ) ucHeap; if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) { uxAddress += ( portBYTE_ALIGNMENT - 1 ); - uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -399,17 +399,17 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; uxAddress -= xHeapStructSize; - uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); - pxEnd = ( void * ) uxAddress; + uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( BlockLink_t * ) uxAddress; pxEnd->xBlockSize = 0; pxEnd->pxNextFreeBlock = NULL; /* To start with there is a single free block that is sized to take up the * entire heap space, minus the space taken by pxEnd. */ - pxFirstFreeBlock = ( void * ) pucAlignedHeap; - pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock = ( BlockLink_t * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock ); pxFirstFreeBlock->pxNextFreeBlock = pxEnd; /* Only one block exists - and it covers the entire usable heap space. */ diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index 193155a4584..c5d29d90850 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -442,10 +442,10 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) { BlockLink_t * pxFirstFreeBlockInRegion = NULL, * pxPreviousFreeBlock; - size_t xAlignedHeap; + portPOINTER_SIZE_TYPE xAlignedHeap; size_t xTotalRegionSize, xTotalHeapSize = 0; BaseType_t xDefinedRegions = 0; - size_t xAddress; + portPOINTER_SIZE_TYPE xAddress; const HeapRegion_t * pxHeapRegion; /* Can only call once! */ @@ -458,7 +458,7 @@ void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) xTotalRegionSize = pxHeapRegion->xSizeInBytes; /* Ensure the heap region starts on a correctly aligned boundary. */ - xAddress = ( size_t ) pxHeapRegion->pucStartAddress; + xAddress = ( portPOINTER_SIZE_TYPE ) pxHeapRegion->pucStartAddress; if( ( xAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) { @@ -466,7 +466,7 @@ void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) xAddress &= ~portBYTE_ALIGNMENT_MASK; /* Adjust the size for the bytes lost to alignment. */ - xTotalRegionSize -= xAddress - ( size_t ) pxHeapRegion->pucStartAddress; + xTotalRegionSize -= ( size_t ) ( xAddress - ( portPOINTER_SIZE_TYPE ) pxHeapRegion->pucStartAddress ); } xAlignedHeap = xAddress; @@ -506,7 +506,7 @@ void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) * sized to take up the entire heap region minus the space taken by the * free block structure. */ pxFirstFreeBlockInRegion = ( BlockLink_t * ) xAlignedHeap; - pxFirstFreeBlockInRegion->xBlockSize = xAddress - ( size_t ) pxFirstFreeBlockInRegion; + pxFirstFreeBlockInRegion->xBlockSize = ( size_t ) ( xAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlockInRegion ); pxFirstFreeBlockInRegion->pxNextFreeBlock = pxEnd; /* If this is not the first region that makes up the entire heap space From 204f08ccc490dc7069fd021af41edf26870b547e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Lu=C5=BEn=C3=BD?= Date: Wed, 31 Aug 2022 01:49:37 +0200 Subject: [PATCH 085/164] RISC-V: Add support for RV32E extension in GCC port (#543) Co-authored-by: Joseph Julicher --- portable/GCC/RISC-V/portASM.S | 43 ++++++++++++++++++------------- portable/GCC/RISC-V/portContext.h | 25 ++++++++++++++---- portable/GCC/RISC-V/portmacro.h | 6 ++++- 3 files changed, 50 insertions(+), 24 deletions(-) diff --git a/portable/GCC/RISC-V/portASM.S b/portable/GCC/RISC-V/portASM.S index 58dfaf968b7..57e98208243 100644 --- a/portable/GCC/RISC-V/portASM.S +++ b/portable/GCC/RISC-V/portASM.S @@ -98,35 +98,35 @@ definitions. */ /*-----------------------------------------------------------*/ .macro portUPDATE_MTIMER_COMPARE_REGISTER - load_x t0, pullMachineTimerCompareRegister /* Load address of compare register into t0. */ - load_x t1, pullNextTime /* Load the address of ullNextTime into t1. */ + load_x a0, pullMachineTimerCompareRegister /* Load address of compare register into a0. */ + load_x a1, pullNextTime /* Load the address of ullNextTime into a1. */ #if( __riscv_xlen == 32 ) /* Update the 64-bit mtimer compare match value in two 32-bit writes. */ - li t4, -1 - lw t2, 0(t1) /* Load the low word of ullNextTime into t2. */ - lw t3, 4(t1) /* Load the high word of ullNextTime into t3. */ - sw t4, 0(t0) /* Low word no smaller than old value to start with - will be overwritten below. */ - sw t3, 4(t0) /* Store high word of ullNextTime into compare register. No smaller than new value. */ - sw t2, 0(t0) /* Store low word of ullNextTime into compare register. */ + li a4, -1 + lw a2, 0(a1) /* Load the low word of ullNextTime into a2. */ + lw a3, 4(a1) /* Load the high word of ullNextTime into a3. */ + sw a4, 0(a0) /* Low word no smaller than old value to start with - will be overwritten below. */ + sw a3, 4(a0) /* Store high word of ullNextTime into compare register. No smaller than new value. */ + sw a2, 0(a0) /* Store low word of ullNextTime into compare register. */ lw t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ - add t4, t0, t2 /* Add the low word of ullNextTime to the timer increments for one tick (assumes timer increment for one tick fits in 32-bits). */ - sltu t5, t4, t2 /* See if the sum of low words overflowed (what about the zero case?). */ - add t6, t3, t5 /* Add overflow to high word of ullNextTime. */ - sw t4, 0(t1) /* Store new low word of ullNextTime. */ - sw t6, 4(t1) /* Store new high word of ullNextTime. */ + add a4, t0, a2 /* Add the low word of ullNextTime to the timer increments for one tick (assumes timer increment for one tick fits in 32-bits). */ + sltu t1, a4, a2 /* See if the sum of low words overflowed (what about the zero case?). */ + add t2, a3, t1 /* Add overflow to high word of ullNextTime. */ + sw a4, 0(a1) /* Store new low word of ullNextTime. */ + sw t2, 4(a1) /* Store new high word of ullNextTime. */ #endif /* __riscv_xlen == 32 */ #if( __riscv_xlen == 64 ) /* Update the 64-bit mtimer compare match value. */ - ld t2, 0(t1) /* Load ullNextTime into t2. */ - sd t2, 0(t0) /* Store ullNextTime into compare register. */ + ld t2, 0(a1) /* Load ullNextTime into t2. */ + sd t2, 0(a0) /* Store ullNextTime into compare register. */ ld t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ add t4, t0, t2 /* Add ullNextTime to the timer increments for one tick. */ - sd t4, 0(t1) /* Store ullNextTime. */ + sd t4, 0(a1) /* Store ullNextTime. */ #endif /* __riscv_xlen == 64 */ .endm @@ -206,7 +206,12 @@ pxPortInitialiseStack: store_x t0, 0(a0) /* mstatus onto the stack. */ addi a0, a0, -portWORD_SIZE /* Space for critical nesting count. */ store_x x0, 0(a0) /* Critical nesting count starts at 0 for every task. */ + +#ifdef __riscv_32e + addi a0, a0, -(6 * portWORD_SIZE) /* Space for registers x11-x15. */ +#else addi a0, a0, -(22 * portWORD_SIZE) /* Space for registers x11-x31. */ +#endif store_x a2, 0(a0) /* Task parameters (pvParameters parameter) goes into register X10/a0 on the stack. */ addi a0, a0, -(6 * portWORD_SIZE) /* Space for registers x5-x9. */ load_x t0, xTaskReturnAddress @@ -241,6 +246,7 @@ xPortStartFirstTask: load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */ load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */ load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */ +#ifndef __riscv_32e load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */ load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */ load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */ @@ -257,12 +263,13 @@ xPortStartFirstTask: load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */ load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */ load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */ +#endif - load_x x5, 29 * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ + load_x x5, portCRITICAL_NESTING_OFFSET * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ load_x x6, pxCriticalNesting /* Load the address of xCriticalNesting into x6. */ store_x x5, 0( x6 ) /* Restore the critical nesting value for this task. */ - load_x x5, 30 * portWORD_SIZE( sp ) /* Initial mstatus into x5 (t0). */ + load_x x5, portMSTATUS_OFFSET * portWORD_SIZE( sp ) /* Initial mstatus into x5 (t0). */ addi x5, x5, 0x08 /* Set MIE bit so the first task starts with interrupts enabled - required as returns with ret not eret. */ csrrw x0, mstatus, x5 /* Interrupts enabled from here! */ diff --git a/portable/GCC/RISC-V/portContext.h b/portable/GCC/RISC-V/portContext.h index 757654b8bf3..c7eeeec6683 100644 --- a/portable/GCC/RISC-V/portContext.h +++ b/portable/GCC/RISC-V/portContext.h @@ -48,7 +48,16 @@ * portasmRESTORE_ADDITIONAL_REGISTERS macros - which can be defined in a chip * specific version of freertos_risc_v_chip_specific_extensions.h. See the * notes at the top of portASM.S file. */ -#define portCONTEXT_SIZE ( 31 * portWORD_SIZE ) +#ifdef __riscv_32e + #define portCONTEXT_SIZE ( 15 * portWORD_SIZE ) + #define portCRITICAL_NESTING_OFFSET 13 + #define portMSTATUS_OFFSET 14 +#else + #define portCONTEXT_SIZE ( 31 * portWORD_SIZE ) + #define portCRITICAL_NESTING_OFFSET 29 + #define portMSTATUS_OFFSET 30 +#endif + /*-----------------------------------------------------------*/ .extern pxCurrentTCB @@ -71,6 +80,7 @@ store_x x13, 10 * portWORD_SIZE( sp ) store_x x14, 11 * portWORD_SIZE( sp ) store_x x15, 12 * portWORD_SIZE( sp ) +#ifndef __riscv_32e store_x x16, 13 * portWORD_SIZE( sp ) store_x x17, 14 * portWORD_SIZE( sp ) store_x x18, 15 * portWORD_SIZE( sp ) @@ -87,12 +97,15 @@ store_x x29, 26 * portWORD_SIZE( sp ) store_x x30, 27 * portWORD_SIZE( sp ) store_x x31, 28 * portWORD_SIZE( sp ) +#endif load_x t0, xCriticalNesting /* Load the value of xCriticalNesting into t0. */ - store_x t0, 29 * portWORD_SIZE( sp ) /* Store the critical nesting value to the stack. */ + store_x t0, portCRITICAL_NESTING_OFFSET * portWORD_SIZE( sp ) /* Store the critical nesting value to the stack. */ + csrr t0, mstatus /* Required for MPIE bit. */ - store_x t0, 30 * portWORD_SIZE( sp ) + store_x t0, portMSTATUS_OFFSET * portWORD_SIZE( sp ) + portasmSAVE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to save any registers unique to the RISC-V implementation. */ @@ -133,10 +146,10 @@ portasmRESTORE_ADDITIONAL_REGISTERS /* Load mstatus with the interrupt enable bits used by the task. */ - load_x t0, 30 * portWORD_SIZE( sp ) + load_x t0, portMSTATUS_OFFSET * portWORD_SIZE( sp ) csrw mstatus, t0 /* Required for MPIE bit. */ - load_x t0, 29 * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ + load_x t0, portCRITICAL_NESTING_OFFSET * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ load_x t1, pxCriticalNesting /* Load the address of xCriticalNesting into t1. */ store_x t0, 0( t1 ) /* Restore the critical nesting value for this task. */ @@ -152,6 +165,7 @@ load_x x13, 10 * portWORD_SIZE( sp ) load_x x14, 11 * portWORD_SIZE( sp ) load_x x15, 12 * portWORD_SIZE( sp ) +#ifndef __riscv_32e load_x x16, 13 * portWORD_SIZE( sp ) load_x x17, 14 * portWORD_SIZE( sp ) load_x x18, 15 * portWORD_SIZE( sp ) @@ -168,6 +182,7 @@ load_x x29, 26 * portWORD_SIZE( sp ) load_x x30, 27 * portWORD_SIZE( sp ) load_x x31, 28 * portWORD_SIZE( sp ) +#endif addi sp, sp, portCONTEXT_SIZE mret diff --git a/portable/GCC/RISC-V/portmacro.h b/portable/GCC/RISC-V/portmacro.h index 837978299d5..76b7c920b74 100644 --- a/portable/GCC/RISC-V/portmacro.h +++ b/portable/GCC/RISC-V/portmacro.h @@ -80,7 +80,11 @@ typedef portUBASE_TYPE TickType_t; /* Architecture specifics. */ #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) -#define portBYTE_ALIGNMENT 16 +#ifdef __riscv_32e + #define portBYTE_ALIGNMENT 8 /* RV32E uses RISC-V EABI with reduced stack alignment requirements */ +#else + #define portBYTE_ALIGNMENT 16 +#endif /*-----------------------------------------------------------*/ /* Scheduler utilities. */ From 00ca1742c2d0f2c25997931d9545cc0148b45c34 Mon Sep 17 00:00:00 2001 From: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Thu, 1 Sep 2022 13:23:02 -0700 Subject: [PATCH 086/164] Added checks for index in ThreadLocalStorage APIs (#552) Added checks for ( xIndex >= 0 ) in ThreadLocalStorage APIs --- tasks.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index 3d7c4bd35ec..5277eb92d4c 100644 --- a/tasks.c +++ b/tasks.c @@ -4907,7 +4907,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) { TCB_t * pxTCB; - if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + if( ( xIndex >= 0 ) && + ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToSet ); configASSERT( pxTCB != NULL ); @@ -4926,7 +4927,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) void * pvReturn = NULL; TCB_t * pxTCB; - if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + if( ( xIndex >= 0 ) && + ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToQuery ); pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; From 274e30455098d982bafe43c3bb4e22060d3a250e Mon Sep 17 00:00:00 2001 From: newbrain <17814222+newbrain@users.noreply.github.com> Date: Thu, 8 Sep 2022 19:33:41 +0200 Subject: [PATCH 087/164] Update of three badly terminated macro definitions (#555) * Update of three badly terminated macro definitions - vTaskDelayUntil() to conform to usual pattern do { ... } while(0) - vTaskNotifyGiveFromISR() and - vTaskGenericNotifyGiveFromISR() to remove extra terminating semicolons - This PR addresses issues #553 and #554 * Adjust formatting of task.h Co-authored-by: Paul Bartell --- include/task.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/task.h b/include/task.h index 961f9cc1952..e681b018d0e 100644 --- a/include/task.h +++ b/include/task.h @@ -908,9 +908,9 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, * return a value. */ #define vTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ) \ - { \ + do { \ ( void ) xTaskDelayUntil( ( pxPreviousWakeTime ), ( xTimeIncrement ) ); \ - } + } while( 0 ) /** @@ -2726,9 +2726,9 @@ void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; #define vTaskNotifyGiveFromISR( xTaskToNotify, pxHigherPriorityTaskWoken ) \ - vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( pxHigherPriorityTaskWoken ) ); + vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( pxHigherPriorityTaskWoken ) ) #define vTaskNotifyGiveIndexedFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken ) \ - vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) ); + vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) ) /** * task. h From d76951b060d885a27dcb02d8bce66edaac2aaabf Mon Sep 17 00:00:00 2001 From: Gabor Toth Date: Tue, 13 Sep 2022 18:38:25 +0200 Subject: [PATCH 088/164] M85 support (#556) * Extend support to Arm Cortex-M85 Signed-off-by: Gabor Toth Change-Id: I679ba8e193638126b683b651513f08df445f9fe6 * Add generated Cortex-M85 support files Signed-off-by: Gabor Toth Change-Id: Ib329d88623c2936ffe3e9a24f5d6e07655e4e5c8 * Extend Trusted Firmware M port Extend Trusted Firmware M port to Cortex-M23, Cortex-M55 and Cortex-M85. Signed-off-by: Gabor Toth Change-Id: If8f1081acfd04e547b3227579e70e355a6adffe3 * Re-run copy_files.py script Signed-off-by: Gaurav Aggarwal Signed-off-by: Gabor Toth Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal --- portable/ARMv8M/ReadMe.txt | 6 +- portable/ARMv8M/copy_files.py | 22 +- portable/ARMv8M/non_secure/ReadMe.txt | 6 +- .../portable/GCC/ARM_CM85/portmacro.h | 71 + .../portable/IAR/ARM_CM85/portmacro.h | 83 ++ portable/ARMv8M/secure/ReadMe.txt | 6 +- portable/GCC/ARM_CM85/non_secure/port.c | 1203 +++++++++++++++++ portable/GCC/ARM_CM85/non_secure/portasm.c | 470 +++++++ portable/GCC/ARM_CM85/non_secure/portasm.h | 114 ++ portable/GCC/ARM_CM85/non_secure/portmacro.h | 71 + .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 311 +++++ portable/GCC/ARM_CM85/secure/secure_context.c | 351 +++++ portable/GCC/ARM_CM85/secure/secure_context.h | 135 ++ .../GCC/ARM_CM85/secure/secure_context_port.c | 97 ++ portable/GCC/ARM_CM85/secure/secure_heap.c | 451 ++++++ portable/GCC/ARM_CM85/secure/secure_heap.h | 66 + portable/GCC/ARM_CM85/secure/secure_init.c | 106 ++ portable/GCC/ARM_CM85/secure/secure_init.h | 54 + .../GCC/ARM_CM85/secure/secure_port_macros.h | 140 ++ portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 1203 +++++++++++++++++ .../GCC/ARM_CM85_NTZ/non_secure/portasm.c | 365 +++++ .../GCC/ARM_CM85_NTZ/non_secure/portasm.h | 114 ++ .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 71 + .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM85/non_secure/port.c | 1203 +++++++++++++++++ portable/IAR/ARM_CM85/non_secure/portasm.h | 114 ++ portable/IAR/ARM_CM85/non_secure/portasm.s | 353 +++++ portable/IAR/ARM_CM85/non_secure/portmacro.h | 83 ++ .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 311 +++++ portable/IAR/ARM_CM85/secure/secure_context.c | 351 +++++ portable/IAR/ARM_CM85/secure/secure_context.h | 135 ++ .../ARM_CM85/secure/secure_context_port_asm.s | 86 ++ portable/IAR/ARM_CM85/secure/secure_heap.c | 451 ++++++ portable/IAR/ARM_CM85/secure/secure_heap.h | 66 + portable/IAR/ARM_CM85/secure/secure_init.c | 106 ++ portable/IAR/ARM_CM85/secure/secure_init.h | 54 + .../IAR/ARM_CM85/secure/secure_port_macros.h | 140 ++ portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 1203 +++++++++++++++++ .../IAR/ARM_CM85_NTZ/non_secure/portasm.h | 114 ++ .../IAR/ARM_CM85_NTZ/non_secure/portasm.s | 262 ++++ .../IAR/ARM_CM85_NTZ/non_secure/portmacro.h | 83 ++ .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 311 +++++ .../GCC/{ARM_CM33_TFM => ARM_TFM}/README.md | 13 +- .../os_wrapper_freertos.c | 0 44 files changed, 11249 insertions(+), 17 deletions(-) create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h create mode 100644 portable/GCC/ARM_CM85/non_secure/port.c create mode 100644 portable/GCC/ARM_CM85/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM85/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM85/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM85/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM85/secure/secure_context.c create mode 100644 portable/GCC/ARM_CM85/secure/secure_context.h create mode 100644 portable/GCC/ARM_CM85/secure/secure_context_port.c create mode 100644 portable/GCC/ARM_CM85/secure/secure_heap.c create mode 100644 portable/GCC/ARM_CM85/secure/secure_heap.h create mode 100644 portable/GCC/ARM_CM85/secure/secure_init.c create mode 100644 portable/GCC/ARM_CM85/secure/secure_init.h create mode 100644 portable/GCC/ARM_CM85/secure/secure_port_macros.h create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/port.c create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM85/non_secure/port.c create mode 100644 portable/IAR/ARM_CM85/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM85/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM85/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM85/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM85/secure/secure_context.c create mode 100644 portable/IAR/ARM_CM85/secure/secure_context.h create mode 100644 portable/IAR/ARM_CM85/secure/secure_context_port_asm.s create mode 100644 portable/IAR/ARM_CM85/secure/secure_heap.c create mode 100644 portable/IAR/ARM_CM85/secure/secure_heap.h create mode 100644 portable/IAR/ARM_CM85/secure/secure_init.c create mode 100644 portable/IAR/ARM_CM85/secure/secure_init.h create mode 100644 portable/IAR/ARM_CM85/secure/secure_port_macros.h create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/port.c create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h rename portable/ThirdParty/GCC/{ARM_CM33_TFM => ARM_TFM}/README.md (77%) rename portable/ThirdParty/GCC/{ARM_CM33_TFM => ARM_TFM}/os_wrapper_freertos.c (100%) diff --git a/portable/ARMv8M/ReadMe.txt b/portable/ARMv8M/ReadMe.txt index 47194bfd6c6..c0db1455792 100644 --- a/portable/ARMv8M/ReadMe.txt +++ b/portable/ARMv8M/ReadMe.txt @@ -1,11 +1,11 @@ This directory tree contains the master copy of the FreeeRTOS Armv8-M and Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to each +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NNN directories prior to each FreeRTOS release. If your Armv8-M and Armv8.1-M application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85] directories. If your Armv8-M and Armv8.1-M application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NTZ directories. diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index 5c2cca7fa1b..9c96cdb1f10 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -33,8 +33,8 @@ _FREERTOS_PORTABLE_DIRECTORY_ = os.path.dirname(_THIS_FILE_DIRECTORY_) _COMPILERS_ = ['GCC', 'IAR'] -_ARCH_NS_ = ['ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] -_ARCH_S_ = ['ARM_CM55', 'ARM_CM33', 'ARM_CM23'] +_ARCH_NS_ = ['ARM_CM85', 'ARM_CM85_NTZ', 'ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] +_ARCH_S_ = ['ARM_CM85', 'ARM_CM55', 'ARM_CM33', 'ARM_CM23'] # Files to be compiled in the Secure Project _SECURE_COMMON_FILE_PATHS_ = [ @@ -48,12 +48,14 @@ 'GCC':{ 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] + 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] }, 'IAR':{ 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] + 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] } } @@ -71,7 +73,11 @@ 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), - os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')] + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], + 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')], + 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')] }, 'IAR':{ 'ARM_CM23' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM23')], @@ -81,7 +87,11 @@ 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), - os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')] + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], + 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')], + 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')] }, } diff --git a/portable/ARMv8M/non_secure/ReadMe.txt b/portable/ARMv8M/non_secure/ReadMe.txt index 777e8921828..68ff904f366 100644 --- a/portable/ARMv8M/non_secure/ReadMe.txt +++ b/portable/ARMv8M/non_secure/ReadMe.txt @@ -1,11 +1,11 @@ This directory tree contains the master copy of the FreeRTOS Armv8-M and Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NNN directories prior to each FreeRTOS release. If your Armv8-M/Armv8.1-M application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85] directories. If your Armv8-M/Armv8.1-M application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NTZ directories. diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h new file mode 100644 index 00000000000..830fa2c1379 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h new file mode 100644 index 00000000000..cfaae813eac --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/secure/ReadMe.txt b/portable/ARMv8M/secure/ReadMe.txt index 777e8921828..68ff904f366 100644 --- a/portable/ARMv8M/secure/ReadMe.txt +++ b/portable/ARMv8M/secure/ReadMe.txt @@ -1,11 +1,11 @@ This directory tree contains the master copy of the FreeRTOS Armv8-M and Armv8.1-M ports. Do not use the files located here! These file are copied into separate -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NNN directories prior to +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NNN directories prior to each FreeRTOS release. If your Armv8-M/Armv8.1-M application uses TrustZone then use the files from the -FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55] directories. +FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85] directories. If your Armv8-M/Armv8.1-M application does not use TrustZone then use the files from -the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55]_NTZ directories. +the FreeRTOS/Source/portable/[compiler]/ARM_CM[23|33|55|85]_NTZ directories. diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/non_secure/portasm.c b/portable/GCC/ARM_CM85/non_secure/portasm.c new file mode 100644 index 00000000000..1e4f0c98f86 --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/portasm.c @@ -0,0 +1,470 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r2] \n"/* Program RNR = 4. */ + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + " ldr r5, xSecureContextConst2 \n" + " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " msr control, r3 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r4 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + "xSecureContextConst2: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " mrs r2, psp \n"/* Read PSP in r2. */ + " \n" + " cbz r0, save_ns_context \n"/* No secure context to save. */ + " push {r0-r2, r14} \n" + " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n"/* LR is now in r3. */ + " mov lr, r3 \n"/* LR = r3. */ + " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " b select_next_task \n" + " \n" + " save_ns_context: \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r3] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ + " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r3] \n"/* Program MAIR0. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r3] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r3] \n"/* Program RNR = 8. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r3] \n"/* Program RNR = 12. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r3] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #else /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #endif /* configENABLE_MPU */ + " \n" + " restore_ns_context: \n" + " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + "xSecureContextConst: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ + +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " svc %0 \n"/* Secure context is allocated in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ + " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ + " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ + " it ne \n" + " svcne %0 \n"/* Secure context is freed in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/non_secure/portasm.h b/portable/GCC/ARM_CM85/non_secure/portasm.h new file mode 100644 index 00000000000..b37f6d16a56 --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h new file mode 100644 index 00000000000..830fa2c1379 --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM85/secure/secure_context.c b/portable/GCC/ARM_CM85/secure/secure_context.c new file mode 100644 index 00000000000..b1d5503cb23 --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/secure/secure_context.h b/portable/GCC/ARM_CM85/secure/secure_context.h new file mode 100644 index 00000000000..57e390c264c --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/GCC/ARM_CM85/secure/secure_context_port.c b/portable/GCC/ARM_CM85/secure/secure_context_port.c new file mode 100644 index 00000000000..ebf02077c7a --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_context_port.c @@ -0,0 +1,97 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + " msr control, r3 \n" /* CONTROL = r3. */ + #endif /* configENABLE_MPU */ + " \n" + " msr psplim, r2 \n" /* PSPLIM = r2. */ + " msr psp, r1 \n" /* PSP = r1. */ + " \n" + " load_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::: "r0", "r1", "r2" + ); +} +/*-----------------------------------------------------------*/ + +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " mrs r1, psp \n" /* r1 = PSP. */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " mrs r2, control \n" /* r2 = CONTROL. */ + " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */ + " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */ + " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + " \n" + " save_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::"i" ( securecontextNO_STACK ) : "r1", "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/secure/secure_heap.c b/portable/GCC/ARM_CM85/secure/secure_heap.c new file mode 100644 index 00000000000..b3a7378188b --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_heap.c @@ -0,0 +1,451 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart, * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/secure/secure_heap.h b/portable/GCC/ARM_CM85/secure/secure_heap.h new file mode 100644 index 00000000000..bd42ff9ba69 --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/GCC/ARM_CM85/secure/secure_init.c b/portable/GCC/ARM_CM85/secure/secure_init.c new file mode 100644 index 00000000000..d91695bc0eb --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/secure/secure_init.h b/portable/GCC/ARM_CM85/secure/secure_init.h new file mode 100644 index 00000000000..e18ba44dafc --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/GCC/ARM_CM85/secure/secure_port_macros.h b/portable/GCC/ARM_CM85/secure/secure_port_macros.h new file mode 100644 index 00000000000..54990549a10 --- /dev/null +++ b/portable/GCC/ARM_CM85/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c new file mode 100644 index 00000000000..21b515e0f6a --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c @@ -0,0 +1,365 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " msr control, r2 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, psp \n"/* Read PSP in r0. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r2, control \n"/* r2 = CONTROL. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ + #else /* configENABLE_MPU */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ + #endif /* configENABLE_MPU */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ + #else /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ + #else /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + #endif /* configENABLE_MPU */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " bx r3 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..b37f6d16a56 --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..830fa2c1379 --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -0,0 +1,71 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __attribute__( ( used ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/non_secure/portasm.h b/portable/IAR/ARM_CM85/non_secure/portasm.h new file mode 100644 index 00000000000..b37f6d16a56 --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM85/non_secure/portasm.s b/portable/IAR/ARM_CM85/non_secure/portasm.s new file mode 100644 index 00000000000..44cd8d0e19d --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/portasm.s @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN xSecureContext + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + EXTERN SecureContext_SaveContext + EXTERN SecureContext_LoadContext + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vPortAllocateSecureContext + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler + PUBLIC vPortFreeSecureContext +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vPortAllocateSecureContext: + svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r3, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r2] /* Program RNR = 4. */ + adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + ldr r5, =xSecureContext + str r1, [r5] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + msr control, r3 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r4 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + ldr r4, =xSecureContext + str r1, [r4] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + mrs r2, psp /* Read PSP in r2. */ + + cbz r0, save_ns_context /* No secure context to save. */ + push {r0-r2, r14} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* LR = r3. */ + lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ +#if ( configENABLE_MPU == 1 ) + subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ +#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ +#endif /* configENABLE_MPU */ + b select_next_task + + save_ns_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #16 /* r2 = r2 + 16. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + subs r2, r2, #16 /* r2 = r2 - 16. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #12 /* r2 = r2 + 12. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + subs r2, r2, #12 /* r2 = r2 - 12. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + + #if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r3] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ + ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r3] /* Program MAIR0. */ + ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r3] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r3] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + + #if ( configENABLE_MPU == 1 ) + ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r3 /* Restore the CONTROL register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #else /* configENABLE_MPU */ + ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #endif /* configENABLE_MPU */ + + restore_ns_context: + ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + +vPortFreeSecureContext: + /* r0 = uint32_t *pulTCB. */ + ldr r2, [r0] /* The first item in the TCB is the top of the stack. */ + ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */ + cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */ + it ne + svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM85/non_secure/portmacro.h b/portable/IAR/ARM_CM85/non_secure/portmacro.h new file mode 100644 index 00000000000..cfaae813eac --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM85/secure/secure_context.c b/portable/IAR/ARM_CM85/secure/secure_context.c new file mode 100644 index 00000000000..b1d5503cb23 --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/secure/secure_context.h b/portable/IAR/ARM_CM85/secure/secure_context.h new file mode 100644 index 00000000000..57e390c264c --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s b/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s new file mode 100644 index 00000000000..99240ca08c4 --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s @@ -0,0 +1,86 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + SECTION .text:CODE:NOROOT(2) + THUMB + +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + PUBLIC SecureContext_LoadContextAsm + PUBLIC SecureContext_SaveContextAsm +/*-----------------------------------------------------------*/ + +SecureContext_LoadContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + +#if ( configENABLE_MPU == 1 ) + ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + msr control, r3 /* CONTROL = r3. */ +#endif /* configENABLE_MPU */ + + msr psplim, r2 /* PSPLIM = r2. */ + msr psp, r1 /* PSP = r1. */ + + load_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + +SecureContext_SaveContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + mrs r1, psp /* r1 = PSP. */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + +#if ( configENABLE_MPU == 1 ) + mrs r2, control /* r2 = CONTROL. */ + stmdb r1!, {r2} /* Store CONTROL value on the stack. */ +#endif /* configENABLE_MPU */ + + str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + movs r1, #0 /* r1 = securecontextNO_STACK. */ + msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */ + msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + + save_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM85/secure/secure_heap.c b/portable/IAR/ARM_CM85/secure/secure_heap.c new file mode 100644 index 00000000000..b3a7378188b --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_heap.c @@ -0,0 +1,451 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart, * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/secure/secure_heap.h b/portable/IAR/ARM_CM85/secure/secure_heap.h new file mode 100644 index 00000000000..bd42ff9ba69 --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/IAR/ARM_CM85/secure/secure_init.c b/portable/IAR/ARM_CM85/secure/secure_init.c new file mode 100644 index 00000000000..d91695bc0eb --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/secure/secure_init.h b/portable/IAR/ARM_CM85/secure/secure_init.h new file mode 100644 index 00000000000..e18ba44dafc --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/IAR/ARM_CM85/secure/secure_port_macros.h b/portable/IAR/ARM_CM85/secure/secure_port_macros.h new file mode 100644 index 00000000000..54990549a10 --- /dev/null +++ b/portable/IAR/ARM_CM85/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c new file mode 100644 index 00000000000..d746923dfee --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -0,0 +1,1203 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + +/* The way the SysTick is clocked is not modified in case it is not the + * same a the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for is + * accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code will execute part way + * through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be un-suspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + * this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + * periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation + * contains its own wait for interrupt or wait for event + * instruction, and so wfi should not be executed again. However, + * the original expected idle time variable must remain unmodified, + * so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will + * increase any slippage between the time maintained by the RTOS and + * calendar time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. + * Again, the time the SysTick is stopped for is accounted for as + * best it can be, but using the tickless mode will inevitably + * result in some tiny drift of the time maintained by the kernel + * with respect to calendar time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + * been set back to the current reload value (the reload back being + * correct for the entire expected idle time) or if the SysTick is + * yet to count to zero (in which case an interrupt other than the + * SysTick must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + * reloaded with ulReloadValue. Reset the + * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + * period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is + * stepped forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + * Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + * value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..b37f6d16a56 --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s new file mode 100644 index 00000000000..9e9970cd40f --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s @@ -0,0 +1,262 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + msr control, r2 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r2 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + mrs r0, psp /* Read PSP in r0. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ +#if ( configENABLE_MPU == 1 ) + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r2, control /* r2 = CONTROL. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ +#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ +#endif /* configENABLE_MPU */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + str r0, [r1] /* Save the new top of stack in TCB. */ + + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ +#else /* configENABLE_MPU */ + ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ +#endif /* configENABLE_MPU */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + + #if ( configENABLE_MPU == 1 ) + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r2 /* Restore the CONTROL register value for the task. */ +#else /* configENABLE_MPU */ + msr psplim, r2 /* Restore the PSPLIM register value for the task. */ +#endif /* configENABLE_MPU */ + msr psp, r0 /* Remember the new top of stack for the task. */ + bx r3 +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..cfaae813eac --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h @@ -0,0 +1,83 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +#ifndef configENABLE_MVE + #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE. +#endif /* configENABLE_MVE */ +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M85" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..4b1f99bf352 --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,311 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md b/portable/ThirdParty/GCC/ARM_TFM/README.md similarity index 77% rename from portable/ThirdParty/GCC/ARM_CM33_TFM/README.md rename to portable/ThirdParty/GCC/ARM_TFM/README.md index ba62d14a133..beb494b19b0 100644 --- a/portable/ThirdParty/GCC/ARM_CM33_TFM/README.md +++ b/portable/ThirdParty/GCC/ARM_TFM/README.md @@ -2,7 +2,8 @@ This port adds the support that FreeRTOS applications can call the secure services in Trusted Firmware M(TF-M) through Platform Security Architecture -(PSA) API based on the ARM Cortex-M33 platform. +(PSA) API based on the ARM Cortex-M23, Cortex-M33, Cortex-M55 and Cortex-M85 +platform. The Platform Security Architecture (PSA) makes it quicker, easier and cheaper to design security into a device from the ground up. PSA is made up of four key @@ -36,7 +37,7 @@ _**Note:** ```TFM_NS_MANAGE_NSID``` must be configured as "OFF" when building TF ## Build the Non-Secure Side -Please copy all the files in ```freertos_kernel\portable\GCC\ARM_CM33_NTZ``` into the ```freertos_kernel\portable\ThirdParty\GCC\ARM_CM33_TFM``` folder before using this port. Note that TrustZone is enabled in this port. The TF-M runs in the Secure Side. +Please copy all the files in ```freertos_kernel\portable\GCC\ARM_CM[23|33|55|85]_NTZ``` into the ```freertos_kernel\portable\ThirdParty\GCC\ARM_TFM``` folder before using this port. Note that TrustZone is enabled in this port. The TF-M runs in the Secure Side. Please call the API ```tfm_ns_interface_init()``` which is defined in ```\app\tfm_ns_interface.c``` by tf-m-tests (tag: TF-Mv1.5.0 & TF-Mv1.6.0) at the very beginning of your application. Otherwise, it will always fail when calling a TF-M service in the Nonsecure Side. @@ -50,6 +51,14 @@ Kernel runs in the Non-Secure Side. * ```configENABLE_FPU``` The setting of this macro is decided by the setting in Secure Side which is platform-specific. If the Secure Side enables Non-Secure access to FPU, then this macro can be configured as 0 or 1. Otherwise, this macro can only be configured as 0. +Please note that Cortex-M23 does not support FPU. +Please refer to [TF-M documentation](https://tf-m-user-guide.trustedfirmware.org/integration_guide/tfm_fpu_support.html) for FPU usage on the Non-Secure side. + +* ```configENABLE_MVE``` +The setting of this macro is decided by the setting in Secure Side which is platform-specific. +If the Secure Side enables Non-Secure access to MVE, then this macro can be configured as 0 or 1. Otherwise, this macro can only be configured as 0. +Please note that only Cortex-M55 and Cortex-M85 support MVE. +Please refer to [TF-M documentation](https://tf-m-user-guide.trustedfirmware.org/integration_guide/tfm_fpu_support.html) for MVE usage on the Non-Secure side. * ```configENABLE_TRUSTZONE``` This macro should be configured as 0 because TF-M doesn't use the secure context management function of FreeRTOS. New secure context management might be introduced when TF-M supports multiple secure context. diff --git a/portable/ThirdParty/GCC/ARM_CM33_TFM/os_wrapper_freertos.c b/portable/ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c similarity index 100% rename from portable/ThirdParty/GCC/ARM_CM33_TFM/os_wrapper_freertos.c rename to portable/ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c From a3d4a1d36a2e74d4ebfb00bba85a364430805a1d Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 13 Sep 2022 23:43:10 -0700 Subject: [PATCH 089/164] portable-RP2040: Fix typo in README.md (#559) Replace "import" with "include" in cmake code sample. --- portable/ThirdParty/GCC/RP2040/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/README.md b/portable/ThirdParty/GCC/RP2040/README.md index 5a0fe4d4326..c50cb4e388c 100644 --- a/portable/ThirdParty/GCC/RP2040/README.md +++ b/portable/ThirdParty/GCC/RP2040/README.md @@ -14,7 +14,7 @@ You can copy [FreeRTOS-Kernel-import.cmake](FreeRTOS-Kernel-import.cmake) into y add the following in your `CMakeLists.txt`: ```cmake -import(FreeRTOS_Kernel_import.cmake) +include(FreeRTOS_Kernel_import.cmake) ``` This will locate the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the @@ -39,4 +39,4 @@ Some additional `config` options are defined [here](include/rp2040_config.h) whi ## Known Limitations -- Tickless idle has not currently been tested, and is likely non-functional \ No newline at end of file +- Tickless idle has not currently been tested, and is likely non-functional From 426b12051807b9f84e83e4bed6941aada1b9914d Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Fri, 16 Sep 2022 00:00:11 -0700 Subject: [PATCH 090/164] Update CMakeLists.txt for Cortex-M55 and Cortex-M85 ports (#560) * Annotate ports CMakeLists.txt with port details * CMake: Add Cortex-M55 and Cortex-M85 ports --- CMakeLists.txt | 16 +- portable/CMakeLists.txt | 545 +++++++++++++++++++++++++++++++++++----- 2 files changed, 499 insertions(+), 62 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 86e149c0275..5b048d224c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,6 +62,15 @@ if(NOT FREERTOS_PORT) " GCC_ARM_CM33_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-secure\n" " GCC_ARM_CM33_SECURE - Compiller: GCC Target: ARM Cortex-M33 secure\n" " GCC_ARM_CM33_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" + " GCC_ARM_CM33_TFM - Compiller: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" + " GCC_ARM_CM55_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-secure\n" + " GCC_ARM_CM55_SECURE - Compiller: GCC Target: ARM Cortex-M55 secure\n" + " GCC_ARM_CM55_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" + " GCC_ARM_CM55_TFM - Compiller: GCC Target: ARM Cortex-M55 non-secure for TF-M\n" + " GCC_ARM_CM85_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-secure\n" + " GCC_ARM_CM85_SECURE - Compiller: GCC Target: ARM Cortex-M85 secure\n" + " GCC_ARM_CM85_NTZ_NONSECURE - Compiller: GCC Target: ARM Cortex-M85 non-trustzone non-secure\n" + " GCC_ARM_CM85_TFM - Compiller: GCC Target: ARM Cortex-M85 non-secure for TF-M\n" " GCC_ARM_CR5 - Compiller: GCC Target: ARM Cortex-R5\n" " GCC_ARM_CRX_NOGIC - Compiller: GCC Target: ARM Cortex-Rx no GIC\n" " GCC_ARM7_AT91FR40008 - Compiller: GCC Target: ARM7 Atmel AT91R40008\n" @@ -94,7 +103,6 @@ if(NOT FREERTOS_PORT) " GCC_TRICORE_1782 - Compiller: GCC Target: TriCore 1782\n" " GCC_ARC_EM_HS - Compiller: GCC Target: DesignWare ARC EM HS\n" " GCC_ARC_V1 - Compiller: GCC Target: DesignWare ARC v1\n" - " GCC_ARM_CM33_TFM - Compiller: GCC Target: ARM Cortex-M33 trusted firmware\n" " GCC_ATMEGA - Compiller: GCC Target: ATmega\n" " GCC_POSIX - Compiller: GCC Target: Posix\n" " GCC_RP2040 - Compiller: GCC Target: RP2040 ARM Cortex-M0+\n" @@ -115,6 +123,12 @@ if(NOT FREERTOS_PORT) " IAR_ARM_CM33_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-secure\n" " IAR_ARM_CM33_SECURE - Compiller: IAR Target: ARM Cortex-M33 secure\n" " IAR_ARM_CM33_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" + " IAR_ARM_CM55_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-secure\n" + " IAR_ARM_CM55_SECURE - Compiller: IAR Target: ARM Cortex-M55 secure\n" + " IAR_ARM_CM55_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" + " IAR_ARM_CM85_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-secure\n" + " IAR_ARM_CM85_SECURE - Compiller: IAR Target: ARM Cortex-M85 secure\n" + " IAR_ARM_CM85_NTZ_NONSECURE - Compiller: IAR Target: ARM Cortex-M85 non-trustzone non-secure\n" " IAR_ARM_CRX_NOGIC - Compiller: IAR Target: ARM Cortex-Rx no GIC\n" " IAR_ATMEGA323 - Compiller: IAR Target: ATMega323\n" " IAR_ATMEL_SAM7S64 - Compiller: IAR Target: Atmel SAM7S64\n" diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 9848adaf4f7..ea54ec4231f 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -1,177 +1,316 @@ # FreeRTOS internal cmake file. Do not use it in user top-level project add_library(freertos_kernel_port STATIC + # 16-Bit DOS ports for BCC $<$: BCC/16BitDOS/common/portcomn.c BCC/16BitDOS/Flsh186/port.c> + $<$: BCC/16BitDOS/common/portcomn.c BCC/16BitDOS/PC/port.c> + + # ARMv7-M port for Texas Instruments Code Composer Studio $<$: CCS/ARM_CM3/port.c CCS/ARM_CM3/portasm.asm> + + # ARMv7E-M port for Texas Instruments Code Composer Studio $<$: CCS/ARM_CM4F/port.c CCS/ARM_CM4F/portasm.asm> + + # ARMv7-R port for Texas Instruments Code Composer Studio $<$: CCS/ARM_Cortex-R4/port.c CCS/ARM_Cortex-R4/portASM.asm> + + # Texas Instruments MSP430 port for Texas Instruments Code Composer Studio $<$: CCS/MSP430X/port.c CCS/MSP430X/portext.asm> + + # NXP (formerly Motorola, Freescale) Cold Fire and 68HCS12 ports for Code Warrior $<$: CodeWarrior/ColdFire_V1/port.c CodeWarrior/ColdFire_V1/portasm.S> + $<$: CodeWarrior/ColdFire_V2/port.c CodeWarrior/ColdFire_V2/portasm.S> + $<$: CodeWarrior/HCS12/port.c> + + # ARMv7-A port for GCC $<$: GCC/ARM_CA9/port.c GCC/ARM_CA9/portASM.S> + + # ARMv8-A ports for GCC $<$: GCC/ARM_CA53_64_BIT/port.c GCC/ARM_CA53_64_BIT/portASM.S> + $<$: GCC/ARM_CA53_64_BIT_SRE/port.c GCC/ARM_CA53_64_BIT_SRE/portASM.S> + + # ARMv6-M port for GCC $<$: GCC/ARM_CM0/port.c> + + # ARMv6-M / Cortex-M0 Raspberry PI RP2040 port for GCC + $<$: + ThirdParty/GCC/RP2040/idle_task_static_memory.c + ThirdParty/GCC/RP2040/port.c> + + # ARMv7-M ports for GCC $<$: GCC/ARM_CM3/port.c> + $<$: GCC/ARM_CM3_MPU/port.c> + + # ARMv7E-M ports for GCC $<$: GCC/ARM_CM4_MPU/port.c> + $<$: GCC/ARM_CM4F/port.c> + $<$: GCC/ARM_CM7/r0p1/port.c> + + # ARMv8-M ports for GCC $<$: GCC/ARM_CM23/non_secure/port.c GCC/ARM_CM23/non_secure/portasm.c> + $<$: GCC/ARM_CM23/secure/secure_context_port.c GCC/ARM_CM23/secure/secure_context.c GCC/ARM_CM23/secure/secure_heap.c GCC/ARM_CM23/secure/secure_init.c> + $<$: GCC/ARM_CM23_NTZ/non_secure/port.c GCC/ARM_CM23_NTZ/non_secure/portasm.c> + $<$: GCC/ARM_CM33/non_secure/port.c GCC/ARM_CM33/non_secure/portasm.c> + $<$: GCC/ARM_CM33/secure/secure_context_port.c GCC/ARM_CM33/secure/secure_context.c GCC/ARM_CM33/secure/secure_heap.c GCC/ARM_CM33/secure/secure_init.c> + $<$: GCC/ARM_CM33_NTZ/non_secure/port.c GCC/ARM_CM33_NTZ/non_secure/portasm.c> + + $<$: + GCC/ARM_CM33_NTZ/non_secure/port.c + GCC/ARM_CM33_NTZ/non_secure/portasm.c + ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c> + + # ARMv8.1-M ports for GCC + $<$: + GCC/ARM_CM55/non_secure/port.c + GCC/ARM_CM55/non_secure/portasm.c> + + $<$: + GCC/ARM_CM55/secure/secure_context_port.c + GCC/ARM_CM55/secure/secure_context.c + GCC/ARM_CM55/secure/secure_heap.c + GCC/ARM_CM55/secure/secure_init.c> + + $<$: + GCC/ARM_CM55_NTZ/non_secure/port.c + GCC/ARM_CM55_NTZ/non_secure/portasm.c> + + $<$: + GCC/ARM_CM55_NTZ/non_secure/port.c + GCC/ARM_CM55_NTZ/non_secure/portasm.c + ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c> + + $<$: + GCC/ARM_CM85/non_secure/port.c + GCC/ARM_CM85/non_secure/portasm.c> + + $<$: + GCC/ARM_CM85/secure/secure_context_port.c + GCC/ARM_CM85/secure/secure_context.c + GCC/ARM_CM85/secure/secure_heap.c + GCC/ARM_CM85/secure/secure_init.c> + + $<$: + GCC/ARM_CM85_NTZ/non_secure/port.c + GCC/ARM_CM85_NTZ/non_secure/portasm.c> + + $<$: + GCC/ARM_CM85_NTZ/non_secure/port.c + GCC/ARM_CM85_NTZ/non_secure/portasm.c + ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c> + + # ARMv7-R ports for GCC $<$: GCC/ARM_CR5/port.c GCC/ARM_CR5/portASM.S> + $<$: GCC/ARM_CRx_No_GIC/port.c GCC/ARM_CRx_No_GIC/portASM.S> + + # ARMv4T ARM7TDMI ports for GCC $<$: GCC/ARM7_AT91FR40008/port.c GCC/ARM7_AT91FR40008/portISR.c> + $<$: GCC/ARM7_AT91SAM7S/lib_AT91SAM7X256.c GCC/ARM7_AT91SAM7S/port.c GCC/ARM7_AT91SAM7S/portISR.c> + $<$: GCC/ARM7_LPC2000/port.c GCC/ARM7_LPC2000/portISR.c> + $<$: GCC/ARM7_LPC23xx/port.c GCC/ARM7_LPC23xx/portISR.c> + + $<$: + GCC/STR75x/port.c + GCC/STR75x/portISR.c> + + # Microchip (formerly Ateml) AVR8 ports for GCC $<$: GCC/ATMega323/port.c> + + $<$: + ThirdParty/GCC/ATmega/port.c> + + $<$: + ThirdParty/Partner-Supported-Ports/GCC/AVR_AVRDx/port.c> + + $<$: + ThirdParty/Partner-Supported-Ports/GCC/AVR_Mega0/port.c> + + # Microchip (formerly Ateml) AVR32 port for GCC $<$: GCC/AVR32_UC3/exception.S GCC/AVR32_UC3/port.c> + + # NXP (formerly Motorola, Freescale) Cold Fire and 68HCS12 ports for GCC $<$: GCC/ColdFire_V2/port.c GCC/ColdFire_V2/portasm.S> + + $<$: + GCC/HCS12/port.c> + + # Cortus APS3 soft core port for GCC $<$: GCC/CORTUS_APS3/port.c> + + # Renesas (formerly Hitach) H8S port for GCC $<$: GCC/H8S2329/port.c> - $<$: - GCC/HCS12/port.c> + + # x86 / IA32 flat memory model port for GCC $<$: GCC/IA32_flat/port.c GCC/IA32_flat/portASM.S> + + # Xilinx MicroBlaze soft core ports for GCC $<$: GCC/MicroBlaze/port.c GCC/MicroBlaze/portasm.s> + $<$: GCC/MicroBlazeV8/port.c GCC/MicroBlazeV8/port_exceptions.c GCC/MicroBlazeV8/portasm.S> + $<$: GCC/MicroBlazeV9/port.c GCC/MicroBlazeV9/port_exceptions.c GCC/MicroBlazeV9/portasm.S> - $<$: - GCC/MSP430F449/port.c> - $<$: - GCC/NiosII/port.c - GCC/NiosII/port_asm.S> + + # Xilinx PCC4XX soft core ports for GCC $<$: GCC/PPC405_Xilinx/port.c GCC/PPC405_Xilinx/portasm.S> + $<$: GCC/PPC440_Xilinx/port.c GCC/PPC440_Xilinx/portasm.S> + + # Texas Instruments MSP430 port for GCC + $<$: + GCC/MSP430F449/port.c> + + # Intel (formerly Altera) NIOS II soft core port for GCC + $<$: + GCC/NiosII/port.c + GCC/NiosII/port_asm.S> + + # RISC-V architecture ports for GCC $<$: GCC/RISC-V/port.c GCC/RISC-V/portASM.S> + $<$: GCC/RISC-V/port.c GCC/RISC-V/portASM.S> + + # Renesas RL78 port for GCC $<$: GCC/RL78/port.c GCC/RL78/portasm.S> + + # Renesas RX architecture ports for GCC $<$: GCC/RX100/port.c> + $<$: GCC/RX200/port.c> + $<$: GCC/RX600/port.c> + $<$: GCC/RX600v2/port.c> + $<$: GCC/RX700v3_DPFPU/port.c> - $<$: - GCC/STR75x/port.c - GCC/STR75x/portISR.c> + + # Infineon TriCore 1782 port for GCC $<$: GCC/TriCore_1782/port.c GCC/TriCore_1782/porttrap.c> + + # Synopsys ARC architecture ports for GCC $<$: ThirdParty/GCC/ARC_EM_HS/arc_freertos_exceptions.c ThirdParty/GCC/ARC_EM_HS/arc_support.s ThirdParty/GCC/ARC_EM_HS/freertos_tls.c ThirdParty/GCC/ARC_EM_HS/port.c> + $<$: ThirdParty/GCC/ARC_v1/arc_freertos_exceptions.c ThirdParty/GCC/ARC_v1/arc_support.s ThirdParty/GCC/ARC_v1/port.c> - $<$: - ThirdParty/GCC/ARM_CM33_TFM/os_wrapper_freertos.c> - $<$: - ThirdParty/GCC/ATmega/port.c> + + # Posix Simulator port for GCC $<$: ThirdParty/GCC/Posix/port.c ThirdParty/GCC/Posix/utils/wait_for_event.c> - $<$: - ThirdParty/GCC/RP2040/idle_task_static_memory.c - ThirdParty/GCC/RP2040/port.c> + + # Xtensa LX / Espressif ESP32 port for GCC $<$: ThirdParty/GCC/Xtensa_ESP32/FreeRTOS-openocd.c ThirdParty/GCC/Xtensa_ESP32/port.c @@ -184,199 +323,325 @@ add_library(freertos_kernel_port STATIC ThirdParty/GCC/Xtensa_ESP32/xtensa_overlay_os_hook.c ThirdParty/GCC/Xtensa_ESP32/xtensa_vector_defaults.S ThirdParty/GCC/Xtensa_ESP32/xtensa_vectors.S> - $<$: - ThirdParty/Partner-Supported-Ports/GCC/AVR_AVRDx/port.c> - $<$: - ThirdParty/Partner-Supported-Ports/GCC/AVR_Mega0/port.c> + + # Renesas (formerly NEC) 78K port for IAR EW78K $<$: IAR/78K0R/port.c IAR/78K0R/portasm.s26> + + # ARMv7-A ports for IAR EWARM $<$: IAR/ARM_CA5_No_GIC/port.c IAR/ARM_CA5_No_GIC/portASM.s> + $<$: IAR/ARM_CA9/port.c IAR/ARM_CA9/portASM.s> + + # ARMv6-M port for IAR EWARM $<$: IAR/ARM_CM0/port.c IAR/ARM_CM0/portasm.s> + + # ARMv7-M port for IAR EWARM $<$: IAR/ARM_CM3/port.c IAR/ARM_CM3/portasm.s> + + # ARMv7E-M ports for IAR EWARM $<$: IAR/ARM_CM4F/port.c IAR/ARM_CM4F/portasm.s> + $<$: IAR/ARM_CM4F_MPU/port.c IAR/ARM_CM4F_MPU/portasm.s> + $<$: IAR/ARM_CM7/r0p1/port.c IAR/ARM_CM7/r0p1/portasm.s> + + # ARMv8-M Ports for IAR EWARM $<$: IAR/ARM_CM23/non_secure/port.c IAR/ARM_CM23/non_secure/portasm.s> + $<$: IAR/ARM_CM23/secure/secure_context_port_asm.s IAR/ARM_CM23/secure/secure_context.c IAR/ARM_CM23/secure/secure_heap.c IAR/ARM_CM23/secure/secure_init.c> + $<$: IAR/ARM_CM23_NTZ/non_secure/port.c IAR/ARM_CM23_NTZ/non_secure/portasm.s> + $<$: IAR/ARM_CM33/non_secure/port.c IAR/ARM_CM33/non_secure/portasm.s> + $<$: IAR/ARM_CM33/secure/secure_context_port_asm.s IAR/ARM_CM33/secure/secure_context.c IAR/ARM_CM33/secure/secure_heap.c IAR/ARM_CM33/secure/secure_init.c> + $<$: IAR/ARM_CM33_NTZ/non_secure/port.c IAR/ARM_CM33_NTZ/non_secure/portasm.s> + + # ARMv8.1-M ports for IAR EWARM + $<$: + IAR/ARM_CM55/non_secure/port.c + IAR/ARM_CM55/non_secure/portasm.s> + + $<$: + IAR/ARM_CM55/secure/secure_context_port_asm.s + IAR/ARM_CM55/secure/secure_context.c + IAR/ARM_CM55/secure/secure_heap.c + IAR/ARM_CM55/secure/secure_init.c> + + $<$: + IAR/ARM_CM55_NTZ/non_secure/port.c + IAR/ARM_CM55_NTZ/non_secure/portasm.s> + + $<$: + IAR/ARM_CM85/non_secure/port.c + IAR/ARM_CM85/non_secure/portasm.s> + + $<$: + IAR/ARM_CM85/secure/secure_context_port_asm.s + IAR/ARM_CM85/secure/secure_context.c + IAR/ARM_CM85/secure/secure_heap.c + IAR/ARM_CM85/secure/secure_init.c> + + $<$: + IAR/ARM_CM85_NTZ/non_secure/port.c + IAR/ARM_CM85_NTZ/non_secure/portasm.s> + + # ARMv7-R Ports for IAR EWARM $<$: IAR/ARM_CRx_No_GIC/port.c IAR/ARM_CRx_No_GIC/portASM.s> + + # Microchip (formerly Atmel) AVR8 ports for IAR EWAVR $<$: IAR/ATMega323/port.c IAR/ATMega323/portmacro.s90> - $<$: - IAR/AtmelSAM7S64/port.c - IAR/AtmelSAM7S64/portasm.s79> - $<$: - IAR/AtmelSAM9XE/port.c - IAR/AtmelSAM9XE/portasm.s79> + $<$: IAR/AVR_AVRDx/port.c IAR/AVR_AVRDx/portmacro.s90> + $<$: IAR/AVR_Mega0/port.c IAR/AVR_Mega0/portmacro.s90> + + # Microchip (formerly Atmel) AVR32 port for IAR Embedded Workbench for AVR32 $<$: IAR/AVR32_UC3/exception.s82 IAR/AVR32_UC3/port.c IAR/AVR32_UC3/read.c IAR/AVR32_UC3/write.c> - $<$: - IAR/LPC2000/port.c - IAR/LPC2000/portasm.s79> + + # Texas Instruments MSP430 ports for IAR Embedded Workbench for MSP430 $<$: IAR/MSP430/port.c IAR/MSP430/portext.s43> + $<$: IAR/MSP430X/port.c IAR/MSP430X/portext.s43> + + # RISC-V architecture port for IAR Embedded Workbench for RISC-V $<$: IAR/RISC-V/port.c IAR/RISC-V/portASM.s> + + # Renesas RL78 port for IAR EWRL78 $<$: IAR/RL78/port.c IAR/RL78/portasm.s87> + + # Renesas RX architecture ports for IAR EWRX $<$: IAR/RX100/port.c IAR/RX100/port_asm.s> + $<$: IAR/RX600/port.c IAR/RX600/port_asm.s> + $<$: IAR/RX700v3_DPFPU/port.c> + $<$: IAR/RXv2/port.c IAR/RXv2/port_asm.s> + + # Renesas (formerly NEC) V850ES port for IAR EWV850 + $<$: + IAR/V850ES/port.c + IAR/V850ES/portasm_Fx3.s85> + + $<$: + IAR/V850ES/port.c + IAR/V850ES/portasm_Hx2.s85> + + # ARMv4T ARM7TDMI ports for IAR Embedded Workbench for ARM $<$: IAR/STR71x/port.c IAR/STR71x/portasm.s79> + $<$: IAR/STR75x/port.c IAR/STR75x/portasm.s79> + + $<$: + IAR/LPC2000/port.c + IAR/LPC2000/portasm.s79> + + $<$: + IAR/AtmelSAM7S64/port.c + IAR/AtmelSAM7S64/portasm.s79> + + # ARMv5TE ARM926 ports for IAR Embedded Workbench for ARM $<$: IAR/STR91x/port.c IAR/STR91x/portasm.s79> - $<$: - IAR/V850ES/port.c - IAR/V850ES/portasm_Fx3.s85> - $<$: - IAR/V850ES/port.c - IAR/V850ES/portasm_Hx2.s85> + + $<$: + IAR/AtmelSAM9XE/port.c + IAR/AtmelSAM9XE/portasm.s79> + + # ARM Cortex-M4F port for the MikroElektronika MikroC compiler $<$: MikroC/ARM_CM4F/port.c> + + # Microchip PIC18 8-bit MCU port for MPLAB XC8 $<$: MPLAB/PIC18F/port.c> + + # Microchip PIC24 16-bit MCU port for MPLAB XC16 $<$: MPLAB/PIC24_dsPIC/port.c MPLAB/PIC24_dsPIC/portasm_PIC24.S> # TODO: What to do with portasm_dsPIC.S ? + + # Microchip MIPS 32-Bit MCU ports for MPLAB XC32 $<$: MPLAB/PIC32MEC14xx/port.c MPLAB/PIC32MEC14xx/port_asm.S> + $<$: MPLAB/PIC32MX/port.c MPLAB/PIC32MX/port_asm.S> + $<$: MPLAB/PIC32MZ/port.c MPLAB/PIC32MZ/port_asm.S> + + # Windows Simulator for Microsoft Visual C Compiler and MinGW GCC $<$: MSVC-MingW/port.c> + + # 16 bit DOS ports for Open Watcom $<$: oWatcom/16BitDOS/common/portcomn.c oWatcom/16BitDOS/Flsh186/port.c> + $<$: oWatcom/16BitDOS/common/portcomn.c oWatcom/16BitDOS/PC/port.c> + $<$: Paradigm/Tern_EE/large_untested/port.c> + $<$: Paradigm/Tern_EE/small/port.c> + + # Renesas RX mcu ports for Renesas CC-RX $<$: Renesas/RX100/port.c Renesas/RX100/port_asm.src> + $<$: Renesas/RX200/port.c Renesas/RX200/port_asm.src> + $<$: Renesas/RX600/port.c Renesas/RX600/port_asm.src> + $<$: Renesas/RX600v2/port.c Renesas/RX600v2/port_asm.src> + $<$: Renesas/RX700v3_DPFPU/port.c Renesas/RX700v3_DPFPU/port_asm.src> + + # Renesas (formerly Hitach) SHA2 SuperH port for the Renesas SH C Compiler $<$: Renesas/SH2A_FPU/port.c Renesas/SH2A_FPU/portasm.src> + + # Texas Instruments MSP430 port for Rowley CrossWorks $<$: Rowley/MSP430F449/port.c Rowley/MSP430F449/portext.asm> + + # ARMv7-A Cortex-A9 port for ARM RVDS / armcc $<$: RVDS/ARM_CA9/port.c RVDS/ARM_CA9/portASM.s> + + # ARMv6-M port for ARM RVDS / armcc $<$: RVDS/ARM_CM0/port.c> + + # ARMv7-M port for ARM RVDS / armcc $<$: RVDS/ARM_CM3/port.c> + + # ARMv7E-M ports for ARM RVDS / armcc $<$: RVDS/ARM_CM4_MPU/port.c> + $<$: RVDS/ARM_CM4F/port.c> + $<$: RVDS/ARM_CM7/r0p1/port.c> + + # ARMv4T / ARM7TDMI LPC21XX port for ARM RVDS / armcc $<$: RVDS/ARM7_LPC21xx/port.c RVDS/ARM7_LPC21xx/portASM.s> + + # Cygnal c8051 port for SDCC (Small Device C Compiler) $<$: SDCC/Cygnal/port.c> + + # Infineon (formerly Fujitsu, Spansion, Cypress) MB9x ports for Softune C Compiler $<$: Softune/MB91460/__STD_LIB_sbrk.c Softune/MB91460/port.c> + $<$: Softune/MB96340/__STD_LIB_sbrk.c Softune/MB96340/port.c> + + # ARMv7E-M (Cortex-M4F) port for TASKING VX-toolset for ARM $<$: Tasking/ARM_CM4F/port.c Tasking/ARM_CM4F/port_asm.asm> + + # Port for C-SKY T-HEAD CK802 $<$: ThirdParty/CDK/T-HEAD_CK802/port.c ThirdParty/CDK/T-HEAD_CK802/portasm.S> + + # Tensilica Xtensa port for XCC $<$: ThirdParty/XCC/Xtensa/port.c ThirdParty/XCC/Xtensa/portasm.S @@ -387,176 +652,334 @@ add_library(freertos_kernel_port STATIC ThirdParty/XCC/Xtensa/xtensa_intr.c ThirdParty/XCC/Xtensa/xtensa_overlay_os_hook.c ThirdParty/XCC/Xtensa/xtensa_vectors.S> + + # Microchip PIC18 port for WIZ-C $<$: WizC/PIC18/port.c WizC/PIC18/Drivers/Tick/isrTick.c WizC/PIC18/Drivers/Tick/Tick.c> ) -if( - FREERTOS_PORT STREQUAL "GCC_ARM_CM23_NTZ_NONSECURE" OR - FREERTOS_PORT STREQUAL "GCC_ARM_CM23_NONSECURE" OR - FREERTOS_PORT STREQUAL "GCC_ARM_CM33_NTZ_NONSECURE" OR - FREERTOS_PORT STREQUAL "GCC_ARM_CM3_MPU" OR - FREERTOS_PORT STREQUAL "GCC_ARM_CM4_MPU" OR - FREERTOS_PORT STREQUAL "GCC_ARM_CM33_NONSECURE" OR - FREERTOS_PORT STREQUAL "IAR_ARM_CM23_NTZ_NONSECURE" OR - FREERTOS_PORT STREQUAL "IAR_ARM_CM23_NONSECURE" OR - FREERTOS_PORT STREQUAL "IAR_ARM_CM33_NTZ_NONSECURE" OR +if( FREERTOS_PORT MATCHES "GCC_ARM_CM(3|4)_MPU" OR FREERTOS_PORT STREQUAL "IAR_ARM_CM4F_MPU" OR - FREERTOS_PORT STREQUAL "IAR_ARM_CM33_NONSECURE" OR - FREERTOS_PORT STREQUAL "RVDS_ARM_CM4_MPU" + FREERTOS_PORT STREQUAL "RVDS_ARM_CM4_MPU" OR + FREERTOS_PORT MATCHES "GCC_ARM_CM(23|33|55|85)_NTZ_NONSECURE" OR + FREERTOS_PORT MATCHES "GCC_ARM_CM(23|33|55|85)_NONSECURE" OR + FREERTOS_PORT MATCHES "GCC_ARM_CM(33|55|85)_TFM" OR + FREERTOS_PORT MATCHES "IAR_ARM_CM(23|33|55|85)_NTZ_NONSECURE" OR + FREERTOS_PORT MATCHES "IAR_ARM_CM(23|33|55|85)_NONSECURE" ) target_sources(freertos_kernel_port PRIVATE Common/mpu_wrappers.c) endif() target_include_directories(freertos_kernel_port PUBLIC + # 16-Bit DOS ports for BCC $<$: ${CMAKE_CURRENT_LIST_DIR}/BCC/16BitDOS/common ${CMAKE_CURRENT_LIST_DIR}/BCC/16BitDOS/Flsh186> + $<$: ${CMAKE_CURRENT_LIST_DIR}/BCC/16BitDOS/common ${CMAKE_CURRENT_LIST_DIR}/BCC/16BitDOS/PC> + + # ARMv7-M port for Texas Instruments Code Composer Studio $<$:${CMAKE_CURRENT_LIST_DIR}/CCS/ARM_CM3> + + # ARMv7E-M port for Texas Instruments Code Composer Studio $<$:${CMAKE_CURRENT_LIST_DIR}/CCS/ARM_CM4F> + + # ARMv7-R port for Texas Instruments Code Composer Studio $<$:${CMAKE_CURRENT_LIST_DIR}/CCS/ARM_Cortex-R4> + + # Texas Instruments MSP430 port for Texas Instruments Code Composer Studio $<$:${CMAKE_CURRENT_LIST_DIR}/CCS/MSP430X> + + # NXP (formerly Motorola, Freescale) Cold Fire and 68HCS12 ports for Code Warrior $<$:${CMAKE_CURRENT_LIST_DIR}/CodeWarrior/ColdFire_V1> $<$:${CMAKE_CURRENT_LIST_DIR}/CodeWarrior/ColdFire_V2> $<$:${CMAKE_CURRENT_LIST_DIR}/CodeWarrior/HCS12> + + # ARMv7-A port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CA9> + + # ARMv8-A ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CA53_64_BIT> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CA53_64_BIT_SRE> + + # ARMv6-M port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM0> + + # ARMv6-M / Cortex-M0 Raspberry PI RP2040 port for GCC + $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/RP2040/include> + + # ARMv7-M ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM3> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM3_MPU> + + # ARMv7E-M ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM4_MPU> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM4F> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM7/r0p1> + + # ARMv8-M ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM23/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM23/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM23_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> + + # ARMv8.1-M ports for GCC + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM85_NTZ/non_secure> + + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM85/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM85/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM85_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM85_NTZ/non_secure> + + # ARMv7-R ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CR5> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CRx_No_GIC> + + # ARMv4T ARM7TDMI ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91FR40008> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91SAM7S> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_LPC2000> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_LPC23xx> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/STR75x> + + # Microchip (formerly Ateml) AVR8 ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ATMega323> + $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/ATmega> + $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/Partner-Supported-Ports/GCC/AVR_AVRDx> + $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/Partner-Supported-Ports/GCC/AVR_Mega0> + + # Microchip (formerly Ateml) AVR32 port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/AVR32_UC3> + + # NXP (formerly Motorola, Freescale) Cold Fire and 68HCS12 ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ColdFire_V2> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/HCS12> + + # Cortus APS3 soft core port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/CORTUS_APS3> + + # Renesas (formerly Hitach) H8S port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/H8S2329> - $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/HCS12> + + # x86 / IA32 flat memory model port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/IA32_flat> + + # Intel (formerly Altera) NIOS II soft core port for GCC + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/NiosII> + + # Texas Instruments MSP430 port for GCC + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/MSP430F449> + + # Xilinx MicroBlaze soft core ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/MicroBlaze> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/MicroBlazeV8> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/MicroBlazeV9> - $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/MSP430F449> - $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/NiosII> + + # Xilinx PCC4XX soft core ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/PPC405_Xilinx> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/PPC440_Xilinx> + + # RISC-V architecture ports for GCC $<$: ${CMAKE_CURRENT_LIST_DIR}/GCC/RISC-V ${CMAKE_CURRENT_LIST_DIR}/GCC/RISC-V/chip_specific_extensions/RISCV_MTIME_CLINT_no_extensions> + $<$: ${CMAKE_CURRENT_LIST_DIR}/GCC/RISC-V ${CMAKE_CURRENT_LIST_DIR}/GCC/RISC-V/chip_specific_extensions/Pulpino_Vega_RV32M1RM> + + # Renesas RL78 port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RL78> + + # Renesas RX architecture ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RX100> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RX200> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RX600> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RX600v2> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/RX700v3_DPFPU> - $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/STR75x> + + # Infineon TriCore 1782 port for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/TriCore_1782> + + # Synopsys ARC architecture ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/ARC_EM_HS> $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/ARC_v1> - $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/ATmega> + + # Posix Simulator port for GCC $<$: ${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/Posix ${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/Posix/utils> - $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/RP2040/include> + + # Xtensa LX / Espressif ESP32 port for GCC $<$: ${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/Xtensa_ESP32 ${CMAKE_CURRENT_LIST_DIR}/ThirdParty/GCC/Xtensa_ESP32/include> - $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/Partner-Supported-Ports/GCC/AVR_AVRDx> - $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/Partner-Supported-Ports/GCC/AVR_Mega0> + + # Renesas (formerly NEC) 78K port for IAR EW78K $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/78K0R> + + # ARMv7-A ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CA5_No_GIC> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CA9> + + # ARMv6-M port for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM0> + + # ARMv7-M port for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM3> + + # ARMv7E-M ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM4F> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM4F_MPU> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM7/r0p1> + + # ARMv8-M Ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM23/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM23/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM23_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33_NTZ/non_secure> + + # ARMv8.1-M ports for IAR EWARM + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55_NTZ/non_secure> + + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM85/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM85/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM85_NTZ/non_secure> + + # ARMv7-R Ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CRx_No_GIC> - $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ATMega323> + + # ARMv4T ARM7TDMI ports for IAR Embedded Workbench for ARM + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR71x> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR75x> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/LPC2000> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/AtmelSAM7S64> + + # ARMv5TE ARM926 ports for IAR Embedded Workbench for ARM + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR91x> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/AtmelSAM9XE> + + # Microchip (formerly Atmel) AVR8 ports for IAR EWAVR + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ATMega323> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/AVR_AVRDx> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/AVR_Mega0> + + # Microchip (formerly Atmel) AVR32 port for IAR Embedded Workbench for AVR32 $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/AVR32_UC3> - $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/LPC2000> + + # Texas Instruments MSP430 ports for IAR Embedded Workbench for MSP430 $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/MSP430> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/MSP430X> + + # RISC-V architecture port for IAR Embedded Workbench for RISC-V $<$: ${CMAKE_CURRENT_LIST_DIR}/IAR/RISC-V ${CMAKE_CURRENT_LIST_DIR}/IAR/RISC-V/chip_specific_extensions/RV32I_CLINT_no_extensions> + + # Renesas RL78 port for IAR EWRL78 $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/RL78> + + # Renesas RX architecture ports for IAR EWRX $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/RX100> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/RX600> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/RX700v3_DPFPU> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/RXv2> - $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR71x> - $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR75x> - $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/STR91x> + + # Renesas (formerly NEC) V850ES port for IAR EWV850 $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/V850ES> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/V850ES> + + # ARM Cortex-M4F port for the MikroElektronika MikroC compiler $<$:${CMAKE_CURRENT_LIST_DIR}/MikroC/ARM_CM4F> + + # Microchip PIC18 8-bit MCU port for MPLAB XC8 $<$:${CMAKE_CURRENT_LIST_DIR}/MPLAB/PIC18F> + + # Microchip PIC24 16-bit MCU port for MPLAB XC16 $<$:${CMAKE_CURRENT_LIST_DIR}/MPLAB/PIC24_dsPIC> + + # Microchip MIPS 32-Bit MCU ports for MPLAB XC32 $<$:${CMAKE_CURRENT_LIST_DIR}/MPLAB/PIC32MEC14xx> $<$:${CMAKE_CURRENT_LIST_DIR}/MPLAB/PIC32MX> $<$:${CMAKE_CURRENT_LIST_DIR}/MPLAB/PIC32MZ> + + # Windows Simulator for Microsoft Visual C Compiler and MinGW GCC $<$:${CMAKE_CURRENT_LIST_DIR}/MSVC-MingW> + + # 16 bit DOS ports for Open Watcom $<$: ${CMAKE_CURRENT_LIST_DIR}/oWatcom/16BitDOS/common ${CMAKE_CURRENT_LIST_DIR}/oWatcom/16BitDOS/Flsh186> $<$: ${CMAKE_CURRENT_LIST_DIR}/oWatcom/16BitDOS/common ${CMAKE_CURRENT_LIST_DIR}/oWatcom/16BitDOS/PC> + $<$:${CMAKE_CURRENT_LIST_DIR}/Paradigm/Tern_EE/large_untested> $<$:${CMAKE_CURRENT_LIST_DIR}/Paradigm/Tern_EE/small> + + # Renesas RX mcu ports for Renesas CC-RX $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/RX100> $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/RX200> $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/RX600> $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/RX600v2> $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/RX700v3_DPFPU> + + # Renesas (formerly Hitach) SHA2 SuperH port for the Renesas SH C Compiler $<$:${CMAKE_CURRENT_LIST_DIR}/Renesas/SH2A_FPU> + + # Texas Instruments MSP430 port for Rowley CrossWorks $<$:${CMAKE_CURRENT_LIST_DIR}/Rowley/MSP430F449> + + # ARMv7-A Cortex-A9 port for ARM RVDS / armcc $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CA9> + + # ARMv6-M port for ARM RVDS / armcc $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CM0> + + # ARMv7-M port for ARM RVDS / armcc $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CM3> + + # ARMv7E-M ports for ARM RVDS / armcc $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CM4_MPU> $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CM4F> $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM_CM7/r0p1> + + # ARMv4T / ARM7TDMI LPC21XX port for ARM RVDS / armcc $<$:${CMAKE_CURRENT_LIST_DIR}/RVDS/ARM7_LPC21xx> + + # Cygnal c8051 port for SDCC (Small Device C Compiler) $<$:${CMAKE_CURRENT_LIST_DIR}/SDCC/Cygnal> + + # Infineon (formerly Fujitsu, Spansion, Cypress) MB9x ports for Softune C Compiler $<$:${CMAKE_CURRENT_LIST_DIR}/Softune/MB91460> $<$:${CMAKE_CURRENT_LIST_DIR}/Softune/MB96340> + + # ARMv7E-M (Cortex-M4F) port for TASKING VX-toolset for ARM $<$:${CMAKE_CURRENT_LIST_DIR}/Tasking/ARM_CM4F> + + # Port for C-SKY T-HEAD CK802 $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/CDK/T-HEAD_CK802> + + # Tensilica Xtensa port for XCC $<$:${CMAKE_CURRENT_LIST_DIR}/ThirdParty/XCC/Xtensa> + + # Microchip PIC18 port for WIZ-C $<$:${CMAKE_CURRENT_LIST_DIR}/WizC/PIC18> ) From a17b7bb081417708c45ac3689fe7ea843ae99d73 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Wed, 7 Sep 2022 14:47:14 +0530 Subject: [PATCH 091/164] Use highest numbered MPU regions for kernel ARMv7-M allows overlapping MPU regions. When 2 MPU regions overlap, the MPU configuration of the higher numbered MPU region is applied. For example, if a memory area is covered by 2 MPU regions 0 and 1, the memory permissions for MPU region 1 are applied. We use 5 MPU regions for kernel code and kernel data protections and leave the remaining for the application writer. We were using lowest numbered MPU regions (0-4) for kernel protections and leaving the remaining for the application writer. The application writer could configure those higher numbered MPU regions to override kernel protections. This commit changes the code to use highest numbered MPU regions for kernel protections and leave the remaining for the application writer. This ensures that the application writer cannot override kernel protections. We thank the SecLab team at Northeastern University for reporting this issue. Signed-off-by: Gaurav Aggarwal --- portable/GCC/ARM_CM3_MPU/port.c | 27 +++++-------------- portable/GCC/ARM_CM3_MPU/portmacro.h | 14 +++++----- portable/GCC/ARM_CM4_MPU/port.c | 39 +++++++++------------------ portable/GCC/ARM_CM4_MPU/portmacro.h | 21 +++++++-------- portable/IAR/ARM_CM4F_MPU/port.c | 31 +++++++-------------- portable/IAR/ARM_CM4F_MPU/portmacro.h | 21 +++++++-------- portable/RVDS/ARM_CM4_MPU/port.c | 39 +++++++++------------------ portable/RVDS/ARM_CM4_MPU/portmacro.h | 21 +++++++-------- 8 files changed, 79 insertions(+), 134 deletions(-) diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index e7bec2ecc16..c3fa9989d6c 100644 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -756,7 +756,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | @@ -764,23 +764,10 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); - /* Re-instate the privileged only RAM region as xRegion[ 0 ] will have - * just removed the privileged only parameters. */ - xMPUSettings->xRegion[ 1 ].ulRegionBaseAddress = - ( ( uint32_t ) __privileged_data_start__ ) | /* Base address. */ - ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + 1 ); - - xMPUSettings->xRegion[ 1 ].ulRegionAttribute = - ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | - ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | - prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | - ( portMPU_REGION_ENABLE ); - - /* Invalidate all other regions. */ - for( ul = 2; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + /* Invalidate user configurable regions. */ + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } } @@ -807,7 +794,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, lIndex = 0; - for( ul = 1; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL ) { @@ -817,7 +804,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) | ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + ul ); /* Region number. */ + ( ul - 1UL ); /* Region number. */ xMPUSettings->xRegion[ ul ].ulRegionAttribute = ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | @@ -827,7 +814,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, else { /* Invalidate the region. */ - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index 406a4843aba..c2a35490b7d 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -82,15 +82,15 @@ #define portMPU_REGION_CACHEABLE_BUFFERABLE ( 0x07UL << 16UL ) #define portMPU_REGION_EXECUTE_NEVER ( 0x01UL << 28UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 0UL ) - #define portPRIVILEGED_FLASH_REGION ( 1UL ) - #define portPRIVILEGED_RAM_REGION ( 2UL ) #define portGENERAL_PERIPHERALS_REGION ( 3UL ) #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( 7UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 5UL ) + #define portPRIVILEGED_FLASH_REGION ( 6UL ) + #define portPRIVILEGED_RAM_REGION ( 7UL ) + #define portFIRST_CONFIGURABLE_REGION ( 0UL ) + #define portLAST_CONFIGURABLE_REGION ( 2UL ) #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + #define portTOTAL_NUM_REGIONS_IN_TCB ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ #define portSWITCH_TO_USER_MODE() __asm volatile ( " mrs r0, control \n orr r0, #1 \n msr control, r0 " ::: "r0", "memory" ) @@ -103,7 +103,7 @@ /* Plus 1 to create space for the stack region. */ typedef struct MPU_SETTINGS { - xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS ]; + xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; } xMPU_SETTINGS; /* Architecture specifics. */ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index ecffbfe0011..a8609251d1d 100644 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -81,7 +81,7 @@ #define portMPU_REGION_BASE_ADDRESS_REG ( *( ( volatile uint32_t * ) 0xe000ed9C ) ) #define portMPU_REGION_ATTRIBUTE_REG ( *( ( volatile uint32_t * ) 0xe000edA0 ) ) #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) -#define portEXPECTED_MPU_TYPE_VALUE ( portTOTAL_NUM_REGIONS << 8UL ) +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) #define portMPU_ENABLE ( 0x01UL ) #define portMPU_BACKGROUND_ENABLE ( 1UL << 2UL ) #define portPRIVILEGED_EXECUTION_START_ADDRESS ( 0UL ) @@ -380,12 +380,12 @@ static void prvRestoreContextOfFirstTask( void ) " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ " \n" - #if ( portTOTAL_NUM_REGIONS == 16 ) + #if ( configTOTAL_MPU_REGIONS == 16 ) " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* portTOTAL_NUM_REGIONS == 16. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ @@ -633,12 +633,12 @@ void xPortPendSVHandler( void ) " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ " \n" - #if ( portTOTAL_NUM_REGIONS == 16 ) + #if ( configTOTAL_MPU_REGIONS == 16 ) " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* portTOTAL_NUM_REGIONS == 16. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ @@ -736,7 +736,7 @@ static void prvSetupMPU( void ) #endif /* if defined( __ARMCC_VERSION ) */ /* The only permitted number of regions are 8 or 16. */ - configASSERT( ( portTOTAL_NUM_REGIONS == 8 ) || ( portTOTAL_NUM_REGIONS == 16 ) ); + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); @@ -879,7 +879,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | @@ -887,23 +887,10 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); - /* Re-instate the privileged only RAM region as xRegion[ 0 ] will have - * just removed the privileged only parameters. */ - xMPUSettings->xRegion[ 1 ].ulRegionBaseAddress = - ( ( uint32_t ) __privileged_data_start__ ) | /* Base address. */ - ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + 1 ); - - xMPUSettings->xRegion[ 1 ].ulRegionAttribute = - ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | - ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | - prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | - ( portMPU_REGION_ENABLE ); - - /* Invalidate all other regions. */ - for( ul = 2; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + /* Invalidate user configurable regions. */ + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } } @@ -930,7 +917,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, lIndex = 0; - for( ul = 1; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL ) { @@ -940,7 +927,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) | ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + ul ); /* Region number. */ + ( ul - 1UL ); /* Region number. */ xMPUSettings->xRegion[ ul ].ulRegionAttribute = ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | @@ -950,7 +937,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, else { /* Invalidate the region. */ - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index cef1b04be65..31d198d111f 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -173,15 +173,15 @@ typedef unsigned long UBaseType_t; #define configTEX_S_C_B_SRAM ( 0x07UL ) #endif -#define portUNPRIVILEGED_FLASH_REGION ( 0UL ) -#define portPRIVILEGED_FLASH_REGION ( 1UL ) -#define portPRIVILEGED_RAM_REGION ( 2UL ) -#define portGENERAL_PERIPHERALS_REGION ( 3UL ) -#define portSTACK_REGION ( 4UL ) -#define portFIRST_CONFIGURABLE_REGION ( 5UL ) -#define portTOTAL_NUM_REGIONS ( configTOTAL_MPU_REGIONS ) -#define portNUM_CONFIGURABLE_REGIONS ( portTOTAL_NUM_REGIONS - portFIRST_CONFIGURABLE_REGION ) -#define portLAST_CONFIGURABLE_REGION ( portTOTAL_NUM_REGIONS - 1 ) +#define portGENERAL_PERIPHERALS_REGION ( configTOTAL_MPU_REGIONS - 5UL ) +#define portSTACK_REGION ( configTOTAL_MPU_REGIONS - 4UL ) +#define portUNPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 3UL ) +#define portPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 2UL ) +#define portPRIVILEGED_RAM_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portFIRST_CONFIGURABLE_REGION ( 0UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 6UL ) +#define portNUM_CONFIGURABLE_REGIONS ( configTOTAL_MPU_REGIONS - 5UL ) +#define portTOTAL_NUM_REGIONS_IN_TCB ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus 1 to create space for the stack region. */ #define portSWITCH_TO_USER_MODE() __asm volatile ( " mrs r0, control \n orr r0, #1 \n msr control, r0 " ::: "r0", "memory" ) @@ -191,10 +191,9 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; -/* Plus 1 to create space for the stack region. */ typedef struct MPU_SETTINGS { - xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS ]; + xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; } xMPU_SETTINGS; /* Architecture specifics. */ diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index c9a254131d4..27db7d8e605 100644 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -82,7 +82,7 @@ #define portMPU_REGION_BASE_ADDRESS_REG ( *( ( volatile uint32_t * ) 0xe000ed9C ) ) #define portMPU_REGION_ATTRIBUTE_REG ( *( ( volatile uint32_t * ) 0xe000edA0 ) ) #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) -#define portEXPECTED_MPU_TYPE_VALUE ( portTOTAL_NUM_REGIONS << 8UL ) +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) #define portMPU_ENABLE ( 0x01UL ) #define portMPU_BACKGROUND_ENABLE ( 1UL << 2UL ) #define portPRIVILEGED_EXECUTION_START_ADDRESS ( 0UL ) @@ -555,7 +555,7 @@ static void prvSetupMPU( void ) extern uint32_t __privileged_data_end__[]; /* The only permitted number of regions are 8 or 16. */ - configASSERT( ( portTOTAL_NUM_REGIONS == 8 ) || ( portTOTAL_NUM_REGIONS == 16 ) ); + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); @@ -656,7 +656,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | @@ -664,23 +664,10 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); - /* Re-instate the privileged only RAM region as xRegion[ 0 ] will have - * just removed the privileged only parameters. */ - xMPUSettings->xRegion[ 1 ].ulRegionBaseAddress = - ( ( uint32_t ) __privileged_data_start__ ) | /* Base address. */ - ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + 1 ); - - xMPUSettings->xRegion[ 1 ].ulRegionAttribute = - ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | - ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | - prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | - ( portMPU_REGION_ENABLE ); - - /* Invalidate all other regions. */ - for( ul = 2; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + /* Invalidate user configurable regions. */ + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } } @@ -707,7 +694,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, lIndex = 0; - for( ul = 1; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL ) { @@ -717,7 +704,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) | ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + ul ); /* Region number. */ + ( ul - 1UL ); /* Region number. */ xMPUSettings->xRegion[ ul ].ulRegionAttribute = ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | @@ -727,7 +714,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, else { /* Invalidate the region. */ - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index 659fb13ea18..896c9394cf7 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -175,15 +175,15 @@ typedef unsigned long UBaseType_t; #define configTEX_S_C_B_SRAM ( 0x07UL ) #endif -#define portUNPRIVILEGED_FLASH_REGION ( 0UL ) -#define portPRIVILEGED_FLASH_REGION ( 1UL ) -#define portPRIVILEGED_RAM_REGION ( 2UL ) -#define portGENERAL_PERIPHERALS_REGION ( 3UL ) -#define portSTACK_REGION ( 4UL ) -#define portFIRST_CONFIGURABLE_REGION ( 5UL ) -#define portTOTAL_NUM_REGIONS ( configTOTAL_MPU_REGIONS ) -#define portNUM_CONFIGURABLE_REGIONS ( portTOTAL_NUM_REGIONS - portFIRST_CONFIGURABLE_REGION ) -#define portLAST_CONFIGURABLE_REGION ( portTOTAL_NUM_REGIONS - 1UL ) +#define portGENERAL_PERIPHERALS_REGION ( configTOTAL_MPU_REGIONS - 5UL ) +#define portSTACK_REGION ( configTOTAL_MPU_REGIONS - 4UL ) +#define portUNPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 3UL ) +#define portPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 2UL ) +#define portPRIVILEGED_RAM_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portFIRST_CONFIGURABLE_REGION ( 0UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 6UL ) +#define portNUM_CONFIGURABLE_REGIONS ( configTOTAL_MPU_REGIONS - 5UL ) +#define portTOTAL_NUM_REGIONS_IN_TCB ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus 1 to create space for the stack region. */ #define portSWITCH_TO_USER_MODE() __asm volatile ( " mrs r0, control \n orr r0, r0, #1 \n msr control, r0 " ::: "r0", "memory" ) @@ -193,10 +193,9 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; -/* Plus 1 to create space for the stack region. */ typedef struct MPU_SETTINGS { - xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS ]; + xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; } xMPU_SETTINGS; /* Architecture specifics. */ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 46fe89d43b2..9f58dbdfc1a 100644 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -70,7 +70,7 @@ #define portMPU_REGION_BASE_ADDRESS_REG ( *( ( volatile uint32_t * ) 0xe000ed9C ) ) #define portMPU_REGION_ATTRIBUTE_REG ( *( ( volatile uint32_t * ) 0xe000edA0 ) ) #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) -#define portEXPECTED_MPU_TYPE_VALUE ( portTOTAL_NUM_REGIONS << 8UL ) +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) #define portMPU_ENABLE ( 0x01UL ) #define portMPU_BACKGROUND_ENABLE ( 1UL << 2UL ) #define portPRIVILEGED_EXECUTION_START_ADDRESS ( 0UL ) @@ -373,12 +373,12 @@ __asm void prvRestoreContextOfFirstTask( void ) ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - #if ( portTOTAL_NUM_REGIONS == 16 ) + #if ( configTOTAL_MPU_REGIONS == 16 ) ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* portTOTAL_NUM_REGIONS == 16. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ ldr r2, =0xe000ed94 /* MPU_CTRL register. */ ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ @@ -637,12 +637,12 @@ __asm void xPortPendSVHandler( void ) ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - #if ( portTOTAL_NUM_REGIONS == 16 ) + #if ( configTOTAL_MPU_REGIONS == 16 ) ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* portTOTAL_NUM_REGIONS == 16. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ ldr r2, =0xe000ed94 /* MPU_CTRL register. */ ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ @@ -737,7 +737,7 @@ static void prvSetupMPU( void ) extern uint32_t __privileged_data_end__; /* The only permitted number of regions are 8 or 16. */ - configASSERT( ( portTOTAL_NUM_REGIONS == 8 ) || ( portTOTAL_NUM_REGIONS == 16 ) ); + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); @@ -868,7 +868,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | @@ -876,23 +876,10 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); - /* Re-instate the privileged only RAM region as xRegion[ 0 ] will have - * just removed the privileged only parameters. */ - xMPUSettings->xRegion[ 1 ].ulRegionBaseAddress = - ( ( uint32_t ) __privileged_data_start__ ) | /* Base address. */ - ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + 1 ); - - xMPUSettings->xRegion[ 1 ].ulRegionAttribute = - ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | - ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | - prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | - ( portMPU_REGION_ENABLE ); - - /* Invalidate all other regions. */ - for( ul = 2; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + /* Invalidate user configurable regions. */ + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } } @@ -919,7 +906,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, lIndex = 0; - for( ul = 1; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) + for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL ) { @@ -929,7 +916,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) | ( portMPU_REGION_VALID ) | - ( portSTACK_REGION + ul ); /* Region number. */ + ( ul - 1UL ); /* Region number. */ xMPUSettings->xRegion[ ul ].ulRegionAttribute = ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | @@ -939,7 +926,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, else { /* Invalidate the region. */ - xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( portSTACK_REGION + ul ) | portMPU_REGION_VALID; + xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; } diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index 7c3d7b5624a..502ce408599 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -172,15 +172,15 @@ typedef unsigned long UBaseType_t; #define configTEX_S_C_B_SRAM ( 0x07UL ) #endif -#define portUNPRIVILEGED_FLASH_REGION ( 0UL ) -#define portPRIVILEGED_FLASH_REGION ( 1UL ) -#define portPRIVILEGED_RAM_REGION ( 2UL ) -#define portGENERAL_PERIPHERALS_REGION ( 3UL ) -#define portSTACK_REGION ( 4UL ) -#define portFIRST_CONFIGURABLE_REGION ( 5UL ) -#define portTOTAL_NUM_REGIONS ( configTOTAL_MPU_REGIONS ) -#define portNUM_CONFIGURABLE_REGIONS ( portTOTAL_NUM_REGIONS - portFIRST_CONFIGURABLE_REGION ) -#define portLAST_CONFIGURABLE_REGION ( portTOTAL_NUM_REGIONS - 1 ) +#define portGENERAL_PERIPHERALS_REGION ( configTOTAL_MPU_REGIONS - 5UL ) +#define portSTACK_REGION ( configTOTAL_MPU_REGIONS - 4UL ) +#define portUNPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 3UL ) +#define portPRIVILEGED_FLASH_REGION ( configTOTAL_MPU_REGIONS - 2UL ) +#define portPRIVILEGED_RAM_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portFIRST_CONFIGURABLE_REGION ( 0UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 6UL ) +#define portNUM_CONFIGURABLE_REGIONS ( configTOTAL_MPU_REGIONS - 5UL ) +#define portTOTAL_NUM_REGIONS_IN_TCB ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus 1 to create space for the stack region. */ void vPortSwitchToUserMode( void ); #define portSWITCH_TO_USER_MODE() vPortSwitchToUserMode() @@ -191,10 +191,9 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; -/* Plus 1 to create space for the stack region. */ typedef struct MPU_SETTINGS { - xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS ]; + xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; } xMPU_SETTINGS; /* Architecture specifics. */ From b18338877414a8f5226b97468f5db3a023a1b5de Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Wed, 7 Sep 2022 14:50:30 +0530 Subject: [PATCH 092/164] Make RAM regions non-executable This commit makes the privileged RAM and stack regions non-executable. Signed-off-by: Gaurav Aggarwal --- portable/GCC/ARM_CM3_MPU/port.c | 5 ++++- portable/GCC/ARM_CM4_MPU/port.c | 5 ++++- portable/IAR/ARM_CM4F_MPU/port.c | 5 ++++- portable/RVDS/ARM_CM4_MPU/port.c | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index c3fa9989d6c..ac7bd666e3d 100644 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -662,6 +662,7 @@ static void prvSetupMPU( void ) portMPU_REGION_ATTRIBUTE_REG = ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | ( portMPU_REGION_ENABLE ); @@ -761,6 +762,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); @@ -786,7 +788,8 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = - ( portMPU_REGION_READ_WRITE ) | /* Read and write. */ + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | ( portMPU_REGION_ENABLE ); diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index a8609251d1d..ef019eda1aa 100644 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -772,6 +772,7 @@ static void prvSetupMPU( void ) ( portPRIVILEGED_RAM_REGION ); portMPU_REGION_ATTRIBUTE_REG = ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | ( portMPU_REGION_ENABLE ); @@ -883,6 +884,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); @@ -909,7 +911,8 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = - ( portMPU_REGION_READ_WRITE ) | /* Read and write. */ + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 27db7d8e605..eaf6c2d744a 100644 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -591,6 +591,7 @@ static void prvSetupMPU( void ) ( portPRIVILEGED_RAM_REGION ); portMPU_REGION_ATTRIBUTE_REG = ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | ( portMPU_REGION_ENABLE ); @@ -660,6 +661,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); @@ -686,7 +688,8 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = - ( portMPU_REGION_READ_WRITE ) | /* Read and write. */ + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 9f58dbdfc1a..340acacbec3 100644 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -773,6 +773,7 @@ static void prvSetupMPU( void ) ( portPRIVILEGED_RAM_REGION ); portMPU_REGION_ATTRIBUTE_REG = ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_data_end__ - ( uint32_t ) __privileged_data_start__ ) | ( portMPU_REGION_ENABLE ); @@ -872,6 +873,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); @@ -898,7 +900,8 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = - ( portMPU_REGION_READ_WRITE ) | /* Read and write. */ + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ) | ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); From 68869210d73faabdcb70497968101a05567b884f Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Wed, 7 Sep 2022 14:57:37 +0530 Subject: [PATCH 093/164] Remove local stack variable form MPU wrappers It was possible for a third party that had already independently gained the ability to execute injected code to achieve further privilege escalation by branching directly inside a FreeRTOS MPU API wrapper function with a manually crafted stack frame. This commit removes the local stack variable `xRunningPrivileged` so that a manually crafted stack frame cannot be used for privilege escalation by branching directly inside a FreeRTOS MPU API wrapper. We thank Certibit Consulting, LLC, Huazhong University of Science and Technology and the SecLab team at Northeastern University for reporting this issue. Signed-off-by: Gaurav Aggarwal --- .github/lexicon.txt | 2 - include/mpu_wrappers.h | 30 - portable/Common/mpu_wrappers.c | 1907 +++++++++++++++++++++++------- portable/GCC/ARM_CM3_MPU/port.c | 56 +- portable/GCC/ARM_CM4_MPU/port.c | 56 +- portable/IAR/ARM_CM4F_MPU/port.c | 76 +- portable/RVDS/ARM_CM4_MPU/port.c | 56 +- 7 files changed, 1693 insertions(+), 490 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 68a28eb0c5f..6e04c246c3c 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2547,7 +2547,6 @@ vportgetheapstats vportinitialiseblocks vportisrstartfirststask vportraisebasepri -vportresetprivilege vportsetmpuregistersetone vportsetuptimerinterrupt vportstartfirststask @@ -2872,7 +2871,6 @@ xperiod xportgetcoreid xportgetfreeheapsize xportinstallinterrupthandler -xportraiseprivilege xportregistercinterrupthandler xportregisterdump xportstartfirsttask diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h index 750d0307db6..5a26113050a 100644 --- a/include/mpu_wrappers.h +++ b/include/mpu_wrappers.h @@ -173,36 +173,6 @@ #define PRIVILEGED_DATA __attribute__( ( section( "privileged_data" ) ) ) #define FREERTOS_SYSTEM_CALL __attribute__( ( section( "freertos_system_calls" ) ) ) -/** - * @brief Calls the port specific code to raise the privilege. - * - * Sets xRunningPrivileged to pdFALSE if privilege was raised, else sets - * it to pdTRUE. - */ - #define xPortRaisePrivilege( xRunningPrivileged ) \ - { \ - /* Check whether the processor is already privileged. */ \ - ( xRunningPrivileged ) = portIS_PRIVILEGED(); \ - \ - /* If the processor is not already privileged, raise privilege. */ \ - if( ( xRunningPrivileged ) == pdFALSE ) \ - { \ - portRAISE_PRIVILEGE(); \ - } \ - } - -/** - * @brief If xRunningPrivileged is not pdTRUE, calls the port specific - * code to reset the privilege, otherwise does nothing. - */ - #define vPortResetPrivilege( xRunningPrivileged ) \ - { \ - if( ( xRunningPrivileged ) == pdFALSE ) \ - { \ - portRESET_PRIVILEGE(); \ - } \ - } - #endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ #else /* portUSING_MPU_WRAPPERS */ diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c index 74990c3dab6..6207d388029 100644 --- a/portable/Common/mpu_wrappers.c +++ b/portable/Common/mpu_wrappers.c @@ -58,11 +58,23 @@ UBaseType_t uxPriority, TaskHandle_t * pxCreatedTask ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ); + } return xReturn; } @@ -79,11 +91,22 @@ StaticTask_t * const pxTaskBuffer ) /* FREERTOS_SYSTEM_CALL */ { TaskHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); + } return xReturn; } @@ -93,24 +116,46 @@ #if ( INCLUDE_vTaskDelete == 1 ) void MPU_vTaskDelete( TaskHandle_t pxTaskToDelete ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskDelete( pxTaskToDelete ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskDelete( pxTaskToDelete ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskDelete( pxTaskToDelete ); + } } - #endif + #endif /* if ( INCLUDE_vTaskDelete == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_xTaskDelayUntil == 1 ) BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, TickType_t xTimeIncrement ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged, xReturn; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); + } return xReturn; } @@ -120,11 +165,23 @@ #if ( INCLUDE_xTaskAbortDelay == 1 ) BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskAbortDelay( xTask ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskAbortDelay( xTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskAbortDelay( xTask ); + } return xReturn; } @@ -134,24 +191,45 @@ #if ( INCLUDE_vTaskDelay == 1 ) void MPU_vTaskDelay( TickType_t xTicksToDelay ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskDelay( xTicksToDelay ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskDelay( xTicksToDelay ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskDelay( xTicksToDelay ); + } } - #endif + #endif /* if ( INCLUDE_vTaskDelay == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_uxTaskPriorityGet == 1 ) UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t pxTask ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTaskPriorityGet( pxTask ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTaskPriorityGet( pxTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTaskPriorityGet( pxTask ); + } return uxReturn; } @@ -162,11 +240,21 @@ void MPU_vTaskPrioritySet( TaskHandle_t pxTask, UBaseType_t uxNewPriority ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskPrioritySet( pxTask, uxNewPriority ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskPrioritySet( pxTask, uxNewPriority ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskPrioritySet( pxTask, uxNewPriority ); + } } #endif /* if ( INCLUDE_vTaskPrioritySet == 1 ) */ /*-----------------------------------------------------------*/ @@ -175,11 +263,22 @@ eTaskState MPU_eTaskGetState( TaskHandle_t pxTask ) /* FREERTOS_SYSTEM_CALL */ { eTaskState eReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - eReturn = eTaskGetState( pxTask ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + eReturn = eTaskGetState( pxTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + eReturn = eTaskGetState( pxTask ); + } return eReturn; } @@ -192,11 +291,21 @@ BaseType_t xGetFreeStackSpace, eTaskState eState ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState ); + } } #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ /*-----------------------------------------------------------*/ @@ -205,11 +314,21 @@ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ { TaskHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetIdleTaskHandle(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + xReturn = xTaskGetIdleTaskHandle(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetIdleTaskHandle(); + } return xReturn; } @@ -219,44 +338,86 @@ #if ( INCLUDE_vTaskSuspend == 1 ) void MPU_vTaskSuspend( TaskHandle_t pxTaskToSuspend ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskSuspend( pxTaskToSuspend ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskSuspend( pxTaskToSuspend ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskSuspend( pxTaskToSuspend ); + } } - #endif + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskSuspend == 1 ) void MPU_vTaskResume( TaskHandle_t pxTaskToResume ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskResume( pxTaskToResume ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskResume( pxTaskToResume ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskResume( pxTaskToResume ); + } } - #endif + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ /*-----------------------------------------------------------*/ void MPU_vTaskSuspendAll( void ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskSuspendAll(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskSuspendAll(); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskSuspendAll(); + } } /*-----------------------------------------------------------*/ BaseType_t MPU_xTaskResumeAll( void ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskResumeAll(); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskResumeAll(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskResumeAll(); + } return xReturn; } @@ -265,11 +426,22 @@ TickType_t MPU_xTaskGetTickCount( void ) /* FREERTOS_SYSTEM_CALL */ { TickType_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetTickCount(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGetTickCount(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetTickCount(); + } return xReturn; } @@ -278,11 +450,22 @@ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTaskGetNumberOfTasks(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTaskGetNumberOfTasks(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTaskGetNumberOfTasks(); + } return uxReturn; } @@ -291,11 +474,22 @@ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* FREERTOS_SYSTEM_CALL */ { char * pcReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - pcReturn = pcTaskGetName( xTaskToQuery ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + pcReturn = pcTaskGetName( xTaskToQuery ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + pcReturn = pcTaskGetName( xTaskToQuery ); + } return pcReturn; } @@ -305,11 +499,22 @@ TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) /* FREERTOS_SYSTEM_CALL */ { TaskHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetHandle( pcNameToQuery ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGetHandle( pcNameToQuery ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetHandle( pcNameToQuery ); + } return xReturn; } @@ -319,36 +524,67 @@ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) void MPU_vTaskList( char * pcWriteBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskList( pcWriteBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskList( pcWriteBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskList( pcWriteBuffer ); + } } - #endif + #endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskGetRunTimeStats( pcWriteBuffer ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskGetRunTimeStats( pcWriteBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskGetRunTimeStats( pcWriteBuffer ); + } } - #endif + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* FREERTOS_SYSTEM_CALL */ { configRUN_TIME_COUNTER_TYPE xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = ulTaskGetIdleRunTimePercent(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = ulTaskGetIdleRunTimePercent(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = ulTaskGetIdleRunTimePercent(); + } return xReturn; } @@ -359,11 +595,22 @@ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* FREERTOS_SYSTEM_CALL */ { configRUN_TIME_COUNTER_TYPE xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = ulTaskGetIdleRunTimeCounter(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = ulTaskGetIdleRunTimeCounter(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = ulTaskGetIdleRunTimeCounter(); + } return xReturn; } @@ -374,11 +621,21 @@ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxTagValue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTaskSetApplicationTaskTag( xTask, pxTagValue ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskSetApplicationTaskTag( xTask, pxTagValue ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskSetApplicationTaskTag( xTask, pxTagValue ); + } } #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ /*-----------------------------------------------------------*/ @@ -387,11 +644,22 @@ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ { TaskHookFunction_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetApplicationTaskTag( xTask ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGetApplicationTaskTag( xTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetApplicationTaskTag( xTask ); + } return xReturn; } @@ -403,11 +671,21 @@ BaseType_t xIndex, void * pvValue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue ); + } } #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ /*-----------------------------------------------------------*/ @@ -417,11 +695,22 @@ BaseType_t xIndex ) /* FREERTOS_SYSTEM_CALL */ { void * pvReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex ); + } return pvReturn; } @@ -432,11 +721,23 @@ BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void * pvParameter ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter ); + } return xReturn; } @@ -449,11 +750,22 @@ configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime ); + } return uxReturn; } @@ -462,11 +774,23 @@ BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskCatchUpTicks( xTicksToCatchUp ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskCatchUpTicks( xTicksToCatchUp ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskCatchUpTicks( xTicksToCatchUp ); + } return xReturn; } @@ -476,11 +800,22 @@ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTaskGetStackHighWaterMark( xTask ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTaskGetStackHighWaterMark( xTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTaskGetStackHighWaterMark( xTask ); + } return uxReturn; } @@ -491,11 +826,22 @@ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ { configSTACK_DEPTH_TYPE uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTaskGetStackHighWaterMark2( xTask ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTaskGetStackHighWaterMark2( xTask ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTaskGetStackHighWaterMark2( xTask ); + } return uxReturn; } @@ -506,11 +852,21 @@ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ { TaskHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetCurrentTaskHandle(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + xReturn = xTaskGetCurrentTaskHandle(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetCurrentTaskHandle(); + } return xReturn; } @@ -520,11 +876,23 @@ #if ( INCLUDE_xTaskGetSchedulerState == 1 ) BaseType_t MPU_xTaskGetSchedulerState( void ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGetSchedulerState(); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTaskGetSchedulerState(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGetSchedulerState(); + } return xReturn; } @@ -533,22 +901,44 @@ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTaskSetTimeOutState( pxTimeOut ); - vPortResetPrivilege( xRunningPrivileged ); + vTaskSetTimeOutState( pxTimeOut ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTaskSetTimeOutState( pxTimeOut ); + } } /*-----------------------------------------------------------*/ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); + } return xReturn; } @@ -561,11 +951,23 @@ eNotifyAction eAction, uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue ); + } return xReturn; } @@ -579,11 +981,23 @@ uint32_t * pulNotificationValue, TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ); + } return xReturn; } @@ -596,11 +1010,22 @@ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { uint32_t ulReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ); + } return ulReturn; } @@ -611,11 +1036,23 @@ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, UBaseType_t uxIndexToClear ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear ); + } return xReturn; } @@ -628,11 +1065,22 @@ uint32_t ulBitsToClear ) /* FREERTOS_SYSTEM_CALL */ { uint32_t ulReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear ); + } return ulReturn; } @@ -645,11 +1093,22 @@ uint8_t ucQueueType ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType ); + } return xReturn; } @@ -664,11 +1123,22 @@ const uint8_t ucQueueType ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType ); + } return xReturn; } @@ -678,11 +1148,23 @@ BaseType_t MPU_xQueueGenericReset( QueueHandle_t pxQueue, BaseType_t xNewQueue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGenericReset( pxQueue, xNewQueue ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xQueueGenericReset( pxQueue, xNewQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGenericReset( pxQueue, xNewQueue ); + } return xReturn; } @@ -693,11 +1175,23 @@ TickType_t xTicksToWait, BaseType_t xCopyPosition ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition ); + } return xReturn; } @@ -706,11 +1200,22 @@ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t pxQueue ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxQueueMessagesWaiting( pxQueue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxQueueMessagesWaiting( pxQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxQueueMessagesWaiting( pxQueue ); + } return uxReturn; } @@ -719,11 +1224,22 @@ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxQueueSpacesAvailable( xQueue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxQueueSpacesAvailable( xQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxQueueSpacesAvailable( xQueue ); + } return uxReturn; } @@ -733,11 +1249,23 @@ void * const pvBuffer, TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueReceive( pxQueue, pvBuffer, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xQueueReceive( pxQueue, pvBuffer, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueReceive( pxQueue, pvBuffer, xTicksToWait ); + } return xReturn; } @@ -747,11 +1275,23 @@ void * const pvBuffer, TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueuePeek( xQueue, pvBuffer, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xQueuePeek( xQueue, pvBuffer, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueuePeek( xQueue, pvBuffer, xTicksToWait ); + } return xReturn; } @@ -760,11 +1300,23 @@ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueSemaphoreTake( xQueue, xTicksToWait ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueSemaphoreTake( xQueue, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueSemaphoreTake( xQueue, xTicksToWait ); + } return xReturn; } @@ -774,11 +1326,22 @@ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* FREERTOS_SYSTEM_CALL */ { void * xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGetMutexHolder( xSemaphore ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueGetMutexHolder( xSemaphore ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGetMutexHolder( xSemaphore ); + } return xReturn; } @@ -789,11 +1352,22 @@ QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueCreateMutex( ucQueueType ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueCreateMutex( ucQueueType ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueCreateMutex( ucQueueType ); + } return xReturn; } @@ -805,11 +1379,22 @@ StaticQueue_t * pxStaticQueue ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue ); + } return xReturn; } @@ -821,11 +1406,22 @@ UBaseType_t uxInitialCount ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount ); + } return xReturn; } @@ -839,11 +1435,22 @@ StaticQueue_t * pxStaticQueue ) /* FREERTOS_SYSTEM_CALL */ { QueueHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue ); + } return xReturn; } @@ -854,11 +1461,23 @@ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xBlockTime ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueTakeMutexRecursive( xMutex, xBlockTime ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueTakeMutexRecursive( xMutex, xBlockTime ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueTakeMutexRecursive( xMutex, xBlockTime ); + } return xReturn; } @@ -868,11 +1487,23 @@ #if ( configUSE_RECURSIVE_MUTEXES == 1 ) BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t xMutex ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueGiveMutexRecursive( xMutex ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xQueueGiveMutexRecursive( xMutex ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueGiveMutexRecursive( xMutex ); + } return xReturn; } @@ -883,11 +1514,22 @@ QueueSetHandle_t MPU_xQueueCreateSet( UBaseType_t uxEventQueueLength ) /* FREERTOS_SYSTEM_CALL */ { QueueSetHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueCreateSet( uxEventQueueLength ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueCreateSet( uxEventQueueLength ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueCreateSet( uxEventQueueLength ); + } return xReturn; } @@ -899,11 +1541,22 @@ TickType_t xBlockTimeTicks ) /* FREERTOS_SYSTEM_CALL */ { QueueSetMemberHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueSelectFromSet( xQueueSet, xBlockTimeTicks ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueSelectFromSet( xQueueSet, xBlockTimeTicks ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueSelectFromSet( xQueueSet, xBlockTimeTicks ); + } return xReturn; } @@ -914,11 +1567,23 @@ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); + } return xReturn; } @@ -929,11 +1594,23 @@ BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet ); + } return xReturn; } @@ -944,11 +1621,21 @@ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char * pcName ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vQueueAddToRegistry( xQueue, pcName ); - vPortResetPrivilege( xRunningPrivileged ); + vQueueAddToRegistry( xQueue, pcName ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vQueueAddToRegistry( xQueue, pcName ); + } } #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ /*-----------------------------------------------------------*/ @@ -956,11 +1643,21 @@ #if configQUEUE_REGISTRY_SIZE > 0 void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vQueueUnregisterQueue( xQueue ); - vPortResetPrivilege( xRunningPrivileged ); + vQueueUnregisterQueue( xQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vQueueUnregisterQueue( xQueue ); + } } #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ /*-----------------------------------------------------------*/ @@ -969,11 +1666,22 @@ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ { const char * pcReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - pcReturn = pcQueueGetName( xQueue ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + pcReturn = pcQueueGetName( xQueue ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + pcReturn = pcQueueGetName( xQueue ); + } return pcReturn; } @@ -982,11 +1690,21 @@ void MPU_vQueueDelete( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vQueueDelete( xQueue ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vQueueDelete( xQueue ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vQueueDelete( xQueue ); + } } /*-----------------------------------------------------------*/ @@ -998,11 +1716,22 @@ TimerCallbackFunction_t pxCallbackFunction ) /* FREERTOS_SYSTEM_CALL */ { TimerHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction ); + } return xReturn; } @@ -1018,11 +1747,22 @@ StaticTimer_t * pxTimerBuffer ) /* FREERTOS_SYSTEM_CALL */ { TimerHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxTimerBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxTimerBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxTimerBuffer ); + } return xReturn; } @@ -1033,11 +1773,22 @@ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { void * pvReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - pvReturn = pvTimerGetTimerID( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + pvReturn = pvTimerGetTimerID( xTimer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + pvReturn = pvTimerGetTimerID( xTimer ); + } return pvReturn; } @@ -1048,11 +1799,21 @@ void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void * pvNewID ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vTimerSetTimerID( xTimer, pvNewID ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTimerSetTimerID( xTimer, pvNewID ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTimerSetTimerID( xTimer, pvNewID ); + } } #endif /* if ( configUSE_TIMERS == 1 ) */ /*-----------------------------------------------------------*/ @@ -1060,11 +1821,23 @@ #if ( configUSE_TIMERS == 1 ) BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerIsTimerActive( xTimer ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerIsTimerActive( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerIsTimerActive( xTimer ); + } return xReturn; } @@ -1075,11 +1848,22 @@ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ { TaskHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerGetTimerDaemonTaskHandle(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerGetTimerDaemonTaskHandle(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerGetTimerDaemonTaskHandle(); + } return xReturn; } @@ -1092,11 +1876,23 @@ uint32_t ulParameter2, TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerPendFunctionCall( xFunctionToPend, pvParameter1, ulParameter2, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xTimerPendFunctionCall( xFunctionToPend, pvParameter1, ulParameter2, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerPendFunctionCall( xFunctionToPend, pvParameter1, ulParameter2, xTicksToWait ); + } return xReturn; } @@ -1107,11 +1903,21 @@ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vTimerSetReloadMode( xTimer, uxAutoReload ); - vPortResetPrivilege( xRunningPrivileged ); + vTimerSetReloadMode( xTimer, uxAutoReload ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vTimerSetReloadMode( xTimer, uxAutoReload ); + } } #endif /* if ( configUSE_TIMERS == 1 ) */ /*-----------------------------------------------------------*/ @@ -1120,11 +1926,22 @@ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) { UBaseType_t uxReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - uxReturn = uxTimerGetReloadMode( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + uxReturn = uxTimerGetReloadMode( xTimer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + uxReturn = uxTimerGetReloadMode( xTimer ); + } return uxReturn; } @@ -1135,11 +1952,22 @@ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { const char * pcReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - pcReturn = pcTimerGetName( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + pcReturn = pcTimerGetName( xTimer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + pcReturn = pcTimerGetName( xTimer ); + } return pcReturn; } @@ -1150,11 +1978,22 @@ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { TickType_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerGetPeriod( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerGetPeriod( xTimer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerGetPeriod( xTimer ); + } return xReturn; } @@ -1165,11 +2004,22 @@ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { TickType_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerGetExpiryTime( xTimer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerGetExpiryTime( xTimer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerGetExpiryTime( xTimer ); + } return xReturn; } @@ -1184,11 +2034,22 @@ const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { BaseType_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xTimerGenericCommand( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xTimerGenericCommand( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xTimerGenericCommand( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ); + } return xReturn; } @@ -1199,11 +2060,22 @@ EventGroupHandle_t MPU_xEventGroupCreate( void ) /* FREERTOS_SYSTEM_CALL */ { EventGroupHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupCreate(); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupCreate(); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupCreate(); + } return xReturn; } @@ -1214,11 +2086,22 @@ EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) /* FREERTOS_SYSTEM_CALL */ { EventGroupHandle_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupCreateStatic( pxEventGroupBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupCreateStatic( pxEventGroupBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupCreateStatic( pxEventGroupBuffer ); + } return xReturn; } @@ -1232,11 +2115,22 @@ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { EventBits_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupWaitBits( xEventGroup, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupWaitBits( xEventGroup, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupWaitBits( xEventGroup, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait ); + } return xReturn; } @@ -1246,11 +2140,22 @@ const EventBits_t uxBitsToClear ) /* FREERTOS_SYSTEM_CALL */ { EventBits_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupClearBits( xEventGroup, uxBitsToClear ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupClearBits( xEventGroup, uxBitsToClear ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupClearBits( xEventGroup, uxBitsToClear ); + } return xReturn; } @@ -1260,11 +2165,22 @@ const EventBits_t uxBitsToSet ) /* FREERTOS_SYSTEM_CALL */ { EventBits_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupSetBits( xEventGroup, uxBitsToSet ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupSetBits( xEventGroup, uxBitsToSet ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupSetBits( xEventGroup, uxBitsToSet ); + } return xReturn; } @@ -1276,11 +2192,22 @@ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { EventBits_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xEventGroupSync( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xEventGroupSync( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xEventGroupSync( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTicksToWait ); + } return xReturn; } @@ -1288,11 +2215,21 @@ void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + vEventGroupDelete( xEventGroup ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vEventGroupDelete( xEventGroup ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vEventGroupDelete( xEventGroup ); + } } /*-----------------------------------------------------------*/ @@ -1302,11 +2239,22 @@ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { size_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferSend( xStreamBuffer, pvTxData, xDataLengthBytes, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferSend( xStreamBuffer, pvTxData, xDataLengthBytes, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferSend( xStreamBuffer, pvTxData, xDataLengthBytes, xTicksToWait ); + } return xReturn; } @@ -1315,11 +2263,22 @@ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { size_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferNextMessageLengthBytes( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferNextMessageLengthBytes( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferNextMessageLengthBytes( xStreamBuffer ); + } return xReturn; } @@ -1331,11 +2290,22 @@ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ { size_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferReceive( xStreamBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferReceive( xStreamBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferReceive( xStreamBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ); + } return xReturn; } @@ -1343,21 +2313,43 @@ void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xRunningPrivileged; + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - vStreamBufferDelete( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + vStreamBufferDelete( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + vStreamBufferDelete( xStreamBuffer ); + } } /*-----------------------------------------------------------*/ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferIsFull( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferIsFull( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferIsFull( xStreamBuffer ); + } return xReturn; } @@ -1365,11 +2357,23 @@ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferIsEmpty( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + xReturn = xStreamBufferIsEmpty( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferIsEmpty( xStreamBuffer ); + } return xReturn; } @@ -1377,11 +2381,23 @@ BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferReset( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferReset( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferReset( xStreamBuffer ); + } return xReturn; } @@ -1390,11 +2406,21 @@ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { size_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferSpacesAvailable( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + xReturn = xStreamBufferSpacesAvailable( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferSpacesAvailable( xStreamBuffer ); + } return xReturn; } @@ -1403,11 +2429,22 @@ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ { size_t xReturn; - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferBytesAvailable( xStreamBuffer ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferBytesAvailable( xStreamBuffer ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferBytesAvailable( xStreamBuffer ); + } return xReturn; } @@ -1416,11 +2453,23 @@ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) /* FREERTOS_SYSTEM_CALL */ { - BaseType_t xReturn, xRunningPrivileged; + BaseType_t xReturn; + + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferSetTriggerLevel( xStreamBuffer, xTriggerLevel ); + portMEMORY_BARRIER(); - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferSetTriggerLevel( xStreamBuffer, xTriggerLevel ); - vPortResetPrivilege( xRunningPrivileged ); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferSetTriggerLevel( xStreamBuffer, xTriggerLevel ); + } return xReturn; } @@ -1434,7 +2483,6 @@ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* FREERTOS_SYSTEM_CALL */ { StreamBufferHandle_t xReturn; - BaseType_t xRunningPrivileged; /** * Streambuffer application level callback functionality is disabled for MPU @@ -1446,13 +2494,29 @@ if( ( pxSendCompletedCallback == NULL ) && ( pxReceiveCompletedCallback == NULL ) ) { - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferGenericCreate( xBufferSizeBytes, - xTriggerLevelBytes, - xIsMessageBuffer, - NULL, - NULL ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferGenericCreate( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + NULL, + NULL ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferGenericCreate( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + NULL, + NULL ); + } } else { @@ -1475,7 +2539,6 @@ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* FREERTOS_SYSTEM_CALL */ { StreamBufferHandle_t xReturn; - BaseType_t xRunningPrivileged; /** * Streambuffer application level callback functionality is disabled for MPU @@ -1487,15 +2550,33 @@ if( ( pxSendCompletedCallback == NULL ) && ( pxReceiveCompletedCallback == NULL ) ) { - xPortRaisePrivilege( xRunningPrivileged ); - xReturn = xStreamBufferGenericCreateStatic( xBufferSizeBytes, - xTriggerLevelBytes, - xIsMessageBuffer, - pucStreamBufferStorageArea, - pxStaticStreamBuffer, - NULL, - NULL ); - vPortResetPrivilege( xRunningPrivileged ); + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + xReturn = xStreamBufferGenericCreateStatic( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + pucStreamBufferStorageArea, + pxStaticStreamBuffer, + NULL, + NULL ); + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + xReturn = xStreamBufferGenericCreateStatic( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + pucStreamBufferStorageArea, + pxStaticStreamBuffer, + NULL, + NULL ); + } } else { @@ -1517,11 +2598,21 @@ * void MPU_FunctionName( [parameters ] ) FREERTOS_SYSTEM_CALL; * void MPU_FunctionName( [parameters ] ) * { - * BaseType_t xRunningPrivileged; + * if( portIS_PRIVILEGED() == pdFALSE ) + * { + * portRAISE_PRIVILEGE(); + * portMEMORY_BARRIER(); + * + * FunctionName( [parameters ] ); + * portMEMORY_BARRIER(); * - * xPortRaisePrivilege( xRunningPrivileged ); - * FunctionName( [parameters ] ); - * vPortResetPrivilege( xRunningPrivileged ); + * portRESET_PRIVILEGE(); + * portMEMORY_BARRIER(); + * } + * else + * { + * FunctionName( [parameters ] ); + * } * } */ diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index ac7bd666e3d..7b61127a2e9 100644 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -495,15 +495,26 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + } +#else portDISABLE_INTERRUPTS(); uxCriticalNesting++; - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ @@ -511,10 +522,34 @@ void vPortEnterCritical( void ) void vPortExitCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } +#else configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -522,9 +557,6 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index ef019eda1aa..97bef2be8ba 100644 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -549,15 +549,26 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + } +#else portDISABLE_INTERRUPTS(); uxCriticalNesting++; - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ @@ -565,10 +576,34 @@ void vPortEnterCritical( void ) void vPortExitCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } +#else configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -576,9 +611,6 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index eaf6c2d744a..3a9eb7ba446 100644 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -464,13 +464,44 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } + portMEMORY_BARRIER(); + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } + } +#else portDISABLE_INTERRUPTS(); uxCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so * assert() if it is being called from an interrupt context. Only API * functions that end in "FromISR" can be used in an interrupt. Only assert if @@ -480,9 +511,6 @@ void vPortEnterCritical( void ) { configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); } - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ @@ -490,21 +518,41 @@ void vPortEnterCritical( void ) void vPortExitCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - configASSERT( uxCriticalNesting ); + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } +#else + configASSERT( uxCriticalNesting ); uxCriticalNesting--; if( uxCriticalNesting == 0 ) { portENABLE_INTERRUPTS(); } - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 340acacbec3..11f5b8b80f9 100644 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -551,15 +551,26 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + } +#else portDISABLE_INTERRUPTS(); uxCriticalNesting++; - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ @@ -567,10 +578,34 @@ void vPortEnterCritical( void ) void vPortExitCritical( void ) { #if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - BaseType_t xRunningPrivileged; - xPortRaisePrivilege( xRunningPrivileged ); -#endif + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } +#else configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -578,9 +613,6 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - vPortResetPrivilege( xRunningPrivileged ); #endif } /*-----------------------------------------------------------*/ From 54faa4894269b8ece00c2a5cfa70e8832e4a44d7 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Wed, 7 Sep 2022 14:58:52 +0530 Subject: [PATCH 094/164] Restrict unpriv task to invoke code with privilege It was possible for an unprivileged task to invoke any function with privilege by passing it as a parameter to MPU_xTaskCreate, MPU_xTaskCreateStatic, MPU_xTimerCreate, MPU_xTimerCreateStatic, or MPU_xTimerPendFunctionCall. This commit ensures that MPU_xTaskCreate and MPU_xTaskCreateStatic can only create unprivileged tasks. It also removes the following APIs: 1. MPU_xTimerCreate 2. MPU_xTimerCreateStatic 3. MPU_xTimerPendFunctionCall We thank Huazhong University of Science and Technology for reporting this issue. Signed-off-by: Gaurav Aggarwal --- include/mpu_wrappers.h | 3 -- portable/Common/mpu_wrappers.c | 96 +++------------------------------- 2 files changed, 6 insertions(+), 93 deletions(-) diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h index 5a26113050a..b355ae44a41 100644 --- a/include/mpu_wrappers.h +++ b/include/mpu_wrappers.h @@ -120,13 +120,10 @@ #endif /* Map standard timer.h API functions to the MPU equivalents. */ - #define xTimerCreate MPU_xTimerCreate - #define xTimerCreateStatic MPU_xTimerCreateStatic #define pvTimerGetTimerID MPU_pvTimerGetTimerID #define vTimerSetTimerID MPU_vTimerSetTimerID #define xTimerIsTimerActive MPU_xTimerIsTimerActive #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle - #define xTimerPendFunctionCall MPU_xTimerPendFunctionCall #define pcTimerGetName MPU_pcTimerGetName #define vTimerSetReloadMode MPU_vTimerSetReloadMode #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c index 6207d388029..c8aed16dff9 100644 --- a/portable/Common/mpu_wrappers.c +++ b/portable/Common/mpu_wrappers.c @@ -65,6 +65,9 @@ portRAISE_PRIVILEGE(); portMEMORY_BARRIER(); + uxPriority = uxPriority & ~( portPRIVILEGE_BIT ); + portMEMORY_BARRIER(); + xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ); portMEMORY_BARRIER(); @@ -97,6 +100,9 @@ portRAISE_PRIVILEGE(); portMEMORY_BARRIER(); + uxPriority = uxPriority & ~( portPRIVILEGE_BIT ); + portMEMORY_BARRIER(); + xReturn = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); portMEMORY_BARRIER(); @@ -1708,67 +1714,6 @@ } /*-----------------------------------------------------------*/ - #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) ) - TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction ) /* FREERTOS_SYSTEM_CALL */ - { - TimerHandle_t xReturn; - - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); - - xReturn = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction ); - portMEMORY_BARRIER(); - - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { - xReturn = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction ); - } - - return xReturn; - } - #endif /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) ) - TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction, - StaticTimer_t * pxTimerBuffer ) /* FREERTOS_SYSTEM_CALL */ - { - TimerHandle_t xReturn; - - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); - - xReturn = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxTimerBuffer ); - portMEMORY_BARRIER(); - - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { - xReturn = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxTimerBuffer ); - } - - return xReturn; - } - #endif /* if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( configUSE_TIMERS == 1 ) void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ { @@ -1870,35 +1815,6 @@ #endif /* if ( configUSE_TIMERS == 1 ) */ /*-----------------------------------------------------------*/ - #if ( ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) - BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, - void * pvParameter1, - uint32_t ulParameter2, - TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ - { - BaseType_t xReturn; - - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); - - xReturn = xTimerPendFunctionCall( xFunctionToPend, pvParameter1, ulParameter2, xTicksToWait ); - portMEMORY_BARRIER(); - - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { - xReturn = xTimerPendFunctionCall( xFunctionToPend, pvParameter1, ulParameter2, xTicksToWait ); - } - - return xReturn; - } - #endif /* if ( ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( configUSE_TIMERS == 1 ) void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */ From 160f731900a08ae096f5d6a054f77e5d2de5734c Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Fri, 16 Sep 2022 16:34:04 +0530 Subject: [PATCH 095/164] Update History.txt Signed-off-by: Gaurav Aggarwal --- History.txt | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/History.txt b/History.txt index 02ebb6a1d49..56f679943e4 100644 --- a/History.txt +++ b/History.txt @@ -1,9 +1,34 @@ -Changes between FreeRTOS V10.4.6 and TBD +Changes between FreeRTOS V10.4.6 and FreeRTOS V10.5.0 released September 16 2022 Documentation and download available at https://www.FreeRTOS.org/ + + ARMv7-M and ARMv8-M MPU ports: It is possible for a third party that + already independently gained the ability to execute injected code to + read from or write to arbitrary addresses by passing a negative argument + as the xIndex parameter to pvTaskGetThreadLocalStoragePointer() or + vTaskSetThreadLocalStoragePointer respectively. + We thank Certibit Consulting, LLC for reporting this issue. + + ARMv7-M and ARMv8-M MPU ports: It is possible for an unprivileged task to + invoke any function with privilege by passing it as a parameter to + MPU_xTaskCreate, MPU_xTaskCreateStatic, MPU_xTimerCreate, + MPU_xTimerCreateStatic, or MPU_xTimerPendFunctionCall. + We thank Huazhong University of Science and Technology for reporting this issue. + + ARMv7-M and ARMv8-M MPU ports: It is possible for a third party that has + already independently gained the ability to execute injected code to + achieve further privilege escalation by branching directly inside a + FreeRTOS MPU API wrapper function with a manually crafted stack frame. + We thank Certibit Consulting, LLC, Huazhong University of Science and + Technology and the SecLab team at Northeastern University for reporting + this issue. + + ARMv7-M MPU ports: It is possible to configure overlapping memory + protection unit (MPU) regions such that an unprivileged task can access + privileged data. + We thank the SecLab team at Northeastern University for reporting this issue. + Add support for ARM Cortex-M55. + + Add support for ARM Cortex-M85. Contributed by @gbrtth. + Add vectored mode interrupt support to the RISC-V port. + + Add support for RV32E extension (Embedded Profile) in RISC-V GCC port. + Contributed by @Limoto. + Heap improvements: - Add a check to heap_2 to track if a memory block is allocated to the application or not. The MSB of the size field is used for this @@ -14,6 +39,8 @@ Documentation and download available at https://www.FreeRTOS.org/ vPortFree() is automatically cleared to zero. - Add a new API pvPortCalloc to heap_2, heap_4 and heap_5 which has the same signature as the standard library calloc function. + - Update the pointer types to portPOINTER_SIZE_TYPE. Contributed by + @Octaviarius. + Add the ability to override send and receive completed callbacks for each instance of a stream buffer or message buffer. Earlier there could be one send and one receive callback for all instances of stream and message @@ -29,6 +56,10 @@ Documentation and download available at https://www.FreeRTOS.org/ sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The functionality is currently not supported for MPU enabled ports. + + Generalize the FreeRTOS's Thread Local Storage (TLS) support so that it + is not tied to newlib and can be used with other c-runtime libraries also. + The default behavior for newlib support is kept same for backward + compatibility. + Add support to build and link FreeRTOS using CMake build system. Contributed by @yhsb2k. + Add support to generate Software Bill of Materials (SBOM) for every release. From 4c8f520cbce6cdbde04ffe76cb2a027582bc5481 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Fri, 16 Sep 2022 22:15:49 +0530 Subject: [PATCH 096/164] Update History.txt as per the PR feedback Signed-off-by: Gaurav Aggarwal --- History.txt | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/History.txt b/History.txt index 56f679943e4..2e8ef7e8b97 100644 --- a/History.txt +++ b/History.txt @@ -2,28 +2,39 @@ Changes between FreeRTOS V10.4.6 and FreeRTOS V10.5.0 released September 16 2022 Documentation and download available at https://www.FreeRTOS.org/ - + ARMv7-M and ARMv8-M MPU ports: It is possible for a third party that + + ARMv7-M and ARMv8-M MPU ports: It was possible for a third party that already independently gained the ability to execute injected code to read from or write to arbitrary addresses by passing a negative argument as the xIndex parameter to pvTaskGetThreadLocalStoragePointer() or - vTaskSetThreadLocalStoragePointer respectively. + vTaskSetThreadLocalStoragePointer respectively. A check has been added to + ensure that passing a negative argument as the xIndex parameter does not + cause arbitrary read or write. We thank Certibit Consulting, LLC for reporting this issue. - + ARMv7-M and ARMv8-M MPU ports: It is possible for an unprivileged task to - invoke any function with privilege by passing it as a parameter to + + ARMv7-M and ARMv8-M MPU ports: It was possible for an unprivileged task + to invoke any function with privilege by passing it as a parameter to MPU_xTaskCreate, MPU_xTaskCreateStatic, MPU_xTimerCreate, - MPU_xTimerCreateStatic, or MPU_xTimerPendFunctionCall. - We thank Huazhong University of Science and Technology for reporting this issue. - + ARMv7-M and ARMv8-M MPU ports: It is possible for a third party that has + MPU_xTimerCreateStatic, or MPU_xTimerPendFunctionCall. MPU_xTaskCreate + and MPU_xTaskCreateStatic have been updated to only allow creation of + unprivileged tasks. MPU_xTimerCreate, MPU_xTimerCreateStatic and + MPU_xTimerPendFunctionCall APIs have been removed. + We thank Huazhong University of Science and Technology for reporting + this issue. + + ARMv7-M and ARMv8-M MPU ports: It was possible for a third party that already independently gained the ability to execute injected code to achieve further privilege escalation by branching directly inside a FreeRTOS MPU API wrapper function with a manually crafted stack frame. + The local stack variable `xRunningPrivileged` has been removed so that + a manually crafted stack frame cannot be used for privilege escalation + by branching directly inside a FreeRTOS MPU API wrapper. We thank Certibit Consulting, LLC, Huazhong University of Science and Technology and the SecLab team at Northeastern University for reporting this issue. - + ARMv7-M MPU ports: It is possible to configure overlapping memory - protection unit (MPU) regions such that an unprivileged task can access - privileged data. - We thank the SecLab team at Northeastern University for reporting this issue. + + ARMv7-M MPU ports: It was possible to configure overlapping memory + protection unit (MPU) regions such that an unprivileged task could access + privileged data. The kernel now uses highest numbered MPU regions for + kernel protections to prevent such MPU configurations. + We thank the SecLab team at Northeastern University for reporting this + issue. + Add support for ARM Cortex-M55. + Add support for ARM Cortex-M85. Contributed by @gbrtth. + Add vectored mode interrupt support to the RISC-V port. From b595adf55f4fcd8c56d8ee491d965bcb9874dc6c Mon Sep 17 00:00:00 2001 From: Ming Yue Date: Tue, 20 Sep 2022 15:32:41 -0700 Subject: [PATCH 097/164] Update RISC-V IAR port to support vector mode. (#458) * Update RISC-V IAR port to support vector mode. * uncrustify Co-authored-by: David Chalco Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: alfred gedeon <28123637+alfred2g@users.noreply.github.com> --- portable/IAR/RISC-V/port.c | 264 ++++++++------- portable/IAR/RISC-V/portASM.s | 545 ++++++++++++++---------------- portable/IAR/RISC-V/portContext.h | 177 ++++++++++ portable/IAR/RISC-V/portmacro.h | 121 +++---- 4 files changed, 629 insertions(+), 478 deletions(-) create mode 100644 portable/IAR/RISC-V/portContext.h diff --git a/portable/IAR/RISC-V/port.c b/portable/IAR/RISC-V/port.c index f8a49ace9b5..1e8819da5e5 100644 --- a/portable/IAR/RISC-V/port.c +++ b/portable/IAR/RISC-V/port.c @@ -27,8 +27,8 @@ */ /*----------------------------------------------------------- - * Implementation of functions defined in portable.h for the RISC-V RV32 port. - *----------------------------------------------------------*/ +* Implementation of functions defined in portable.h for the RISC-V port. +*----------------------------------------------------------*/ /* Scheduler includes. */ #include "FreeRTOS.h" @@ -39,175 +39,203 @@ #include "string.h" #ifdef configCLINT_BASE_ADDRESS - #warning The configCLINT_BASE_ADDRESS constant has been deprecated. configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS are currently being derived from the (possibly 0) configCLINT_BASE_ADDRESS setting. Please update to define configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS dirctly in place of configCLINT_BASE_ADDRESS. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #warning The configCLINT_BASE_ADDRESS constant has been deprecated. configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS are currently being derived from the (possibly 0) configCLINT_BASE_ADDRESS setting. Please update to define configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS dirctly in place of configCLINT_BASE_ADDRESS. See https: /*www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html */ #endif #ifndef configMTIME_BASE_ADDRESS - #warning configMTIME_BASE_ADDRESS must be defined in FreeRTOSConfig.h. If the target chip includes a memory-mapped mtime register then set configMTIME_BASE_ADDRESS to the mapped address. Otherwise set configMTIME_BASE_ADDRESS to 0. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #warning configMTIME_BASE_ADDRESS must be defined in FreeRTOSConfig.h. If the target chip includes a memory-mapped mtime register then set configMTIME_BASE_ADDRESS to the mapped address. Otherwise set configMTIME_BASE_ADDRESS to 0. See https: /*www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html */ #endif #ifndef configMTIMECMP_BASE_ADDRESS - #warning configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. If the target chip includes a memory-mapped mtimecmp register then set configMTIMECMP_BASE_ADDRESS to the mapped address. Otherwise set configMTIMECMP_BASE_ADDRESS to 0. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #warning configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. If the target chip includes a memory-mapped mtimecmp register then set configMTIMECMP_BASE_ADDRESS to the mapped address. Otherwise set configMTIMECMP_BASE_ADDRESS to 0. See https: /*www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html */ #endif /* Let the user override the pre-loading of the initial LR with the address of -prvTaskExitError() in case it messes up unwinding of the stack in the -debugger. */ + * prvTaskExitError() in case it messes up unwinding of the stack in the + * debugger. */ #ifdef configTASK_RETURN_ADDRESS - #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS #else - #define portTASK_RETURN_ADDRESS prvTaskExitError + #define portTASK_RETURN_ADDRESS prvTaskExitError #endif /* The stack used by interrupt service routines. Set configISR_STACK_SIZE_WORDS -to use a statically allocated array as the interrupt stack. Alternative leave -configISR_STACK_SIZE_WORDS undefined and update the linker script so that a -linker variable names __freertos_irq_stack_top has the same value as the top -of the stack used by main. Using the linker script method will repurpose the -stack that was used by main before the scheduler was started for use as the -interrupt stack after the scheduler has started. */ + * to use a statically allocated array as the interrupt stack. Alternative leave + * configISR_STACK_SIZE_WORDS undefined and update the linker script so that a + * linker variable names __freertos_irq_stack_top has the same value as the top + * of the stack used by main. Using the linker script method will repurpose the + * stack that was used by main before the scheduler was started for use as the + * interrupt stack after the scheduler has started. */ #ifdef configISR_STACK_SIZE_WORDS - static __attribute__ ((aligned(16))) StackType_t xISRStack[ configISR_STACK_SIZE_WORDS ] = { 0 }; - const StackType_t xISRStackTop = ( StackType_t ) &( xISRStack[ configISR_STACK_SIZE_WORDS & ~portBYTE_ALIGNMENT_MASK ] ); +static __attribute__( ( aligned( 16 ) ) ) StackType_t xISRStack[ configISR_STACK_SIZE_WORDS ] = { 0 }; +const StackType_t xISRStackTop = ( StackType_t ) &( xISRStack[ configISR_STACK_SIZE_WORDS & ~portBYTE_ALIGNMENT_MASK ] ); - /* Don't use 0xa5 as the stack fill bytes as that is used by the kernerl for - the task stacks, and so will legitimately appear in many positions within - the ISR stack. */ - #define portISR_STACK_FILL_BYTE 0xee +/* Don't use 0xa5 as the stack fill bytes as that is used by the kernerl for + * the task stacks, and so will legitimately appear in many positions within + * the ISR stack. */ + #define portISR_STACK_FILL_BYTE 0xee #else - extern const uint32_t __freertos_irq_stack_top[]; - const StackType_t xISRStackTop = ( StackType_t ) __freertos_irq_stack_top; + extern const uint32_t __freertos_irq_stack_top[]; + const StackType_t xISRStackTop = ( StackType_t ) __freertos_irq_stack_top; #endif +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to * generate the tick interrupt. */ -void vPortSetupTimerInterrupt( void ) __attribute__(( weak )); +void vPortSetupTimerInterrupt( void ) __attribute__( ( weak ) ); /*-----------------------------------------------------------*/ /* Used to program the machine timer compare register. */ uint64_t ullNextTime = 0ULL; -const uint64_t *pullNextTime = &ullNextTime; +const uint64_t * pullNextTime = &ullNextTime; const size_t uxTimerIncrementsForOneTick = ( size_t ) ( ( configCPU_CLOCK_HZ ) / ( configTICK_RATE_HZ ) ); /* Assumes increment won't go over 32-bits. */ uint32_t const ullMachineTimerCompareRegisterBase = configMTIMECMP_BASE_ADDRESS; volatile uint64_t * pullMachineTimerCompareRegister = NULL; -/* Set configCHECK_FOR_STACK_OVERFLOW to 3 to add ISR stack checking to task -stack checking. A problem in the ISR stack will trigger an assert, not call the -stack overflow hook function (because the stack overflow hook is specific to a -task stack, not the ISR stack). */ -#if defined( configISR_STACK_SIZE_WORDS ) && ( configCHECK_FOR_STACK_OVERFLOW > 2 ) - #warning This path not tested, or even compiled yet. +/* Holds the critical nesting value - deliberately non-zero at start up to + * ensure interrupts are not accidentally enabled before the scheduler starts. */ +size_t xCriticalNesting = ( size_t ) 0xaaaaaaaa; +size_t * pxCriticalNesting = &xCriticalNesting; - static const uint8_t ucExpectedStackBytes[] = { - portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ - portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ - portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ - portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ - portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE }; \ +/* Used to catch tasks that attempt to return from their implementing function. */ +size_t xTaskReturnAddress = ( size_t ) portTASK_RETURN_ADDRESS; - #define portCHECK_ISR_STACK() configASSERT( ( memcmp( ( void * ) xISRStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) == 0 ) ) -#else - /* Define the function away. */ - #define portCHECK_ISR_STACK() +/* Set configCHECK_FOR_STACK_OVERFLOW to 3 to add ISR stack checking to task + * stack checking. A problem in the ISR stack will trigger an assert, not call + * the stack overflow hook function (because the stack overflow hook is specific + * to a task stack, not the ISR stack). */ +#if defined( configISR_STACK_SIZE_WORDS ) && ( configCHECK_FOR_STACK_OVERFLOW > 2 ) + #warning This path not tested, or even compiled yet. + + static const uint8_t ucExpectedStackBytes[] = + { + portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ + portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ + portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ + portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, \ + portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE, portISR_STACK_FILL_BYTE + }; \ + + #define portCHECK_ISR_STACK() configASSERT( ( memcmp( ( void * ) xISRStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) == 0 ) ) +#else /* if defined( configISR_STACK_SIZE_WORDS ) && ( configCHECK_FOR_STACK_OVERFLOW > 2 ) */ + /* Define the function away. */ + #define portCHECK_ISR_STACK() #endif /* configCHECK_FOR_STACK_OVERFLOW > 2 */ /*-----------------------------------------------------------*/ -#if( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( xCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) + + void vPortSetupTimerInterrupt( void ) + { + uint32_t ulCurrentTimeHigh, ulCurrentTimeLow; + volatile uint32_t * const pulTimeHigh = ( uint32_t * ) ( ( configMTIME_BASE_ADDRESS ) + 4UL ); /* 8-byte type so high 32-bit word is 4 bytes up. */ + volatile uint32_t * const pulTimeLow = ( uint32_t * ) ( configMTIME_BASE_ADDRESS ); + volatile uint32_t ulHartId; - void vPortSetupTimerInterrupt( void ) - { - uint32_t ulCurrentTimeHigh, ulCurrentTimeLow; - volatile uint32_t * const pulTimeHigh = ( uint32_t * ) ( ( configMTIME_BASE_ADDRESS ) + 4UL ); /* 8-byte typer so high 32-bit word is 4 bytes up. */ - volatile uint32_t * const pulTimeLow = ( uint32_t * ) ( configMTIME_BASE_ADDRESS ); - volatile uint32_t ulHartId; + __asm volatile ( "csrr %0, 0xf14" : "=r" ( ulHartId ) ); /* 0xf14 is hartid. */ - __asm volatile( "csrr %0, 0xf14" : "=r"( ulHartId ) ); /* 0xf14 is hartid. */ - pullMachineTimerCompareRegister = ( volatile uint64_t * ) ( ullMachineTimerCompareRegisterBase + ( ulHartId * sizeof( uint64_t ) ) ); + pullMachineTimerCompareRegister = ( volatile uint64_t * ) ( ullMachineTimerCompareRegisterBase + ( ulHartId * sizeof( uint64_t ) ) ); - do - { - ulCurrentTimeHigh = *pulTimeHigh; - ulCurrentTimeLow = *pulTimeLow; - } while( ulCurrentTimeHigh != *pulTimeHigh ); + do + { + ulCurrentTimeHigh = *pulTimeHigh; + ulCurrentTimeLow = *pulTimeLow; + } while( ulCurrentTimeHigh != *pulTimeHigh ); - ullNextTime = ( uint64_t ) ulCurrentTimeHigh; - ullNextTime <<= 32ULL; /* High 4-byte word is 32-bits up. */ - ullNextTime |= ( uint64_t ) ulCurrentTimeLow; - ullNextTime += ( uint64_t ) uxTimerIncrementsForOneTick; - *pullMachineTimerCompareRegister = ullNextTime; + ullNextTime = ( uint64_t ) ulCurrentTimeHigh; + ullNextTime <<= 32ULL; /* High 4-byte word is 32-bits up. */ + ullNextTime |= ( uint64_t ) ulCurrentTimeLow; + ullNextTime += ( uint64_t ) uxTimerIncrementsForOneTick; + *pullMachineTimerCompareRegister = ullNextTime; - /* Prepare the time to use after the next tick interrupt. */ - ullNextTime += ( uint64_t ) uxTimerIncrementsForOneTick; - } + /* Prepare the time to use after the next tick interrupt. */ + ullNextTime += ( uint64_t ) uxTimerIncrementsForOneTick; + } #endif /* ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIME_BASE_ADDRESS != 0 ) */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) { -extern void xPortStartFirstTask( void ); - - #if( configASSERT_DEFINED == 1 ) - { - volatile uint32_t mtvec = 0; - - /* Check the least significant two bits of mtvec are 00 - indicating - single vector mode. */ - __asm volatile( "csrr %0, 0x305" : "=r"( mtvec ) ); /* 0x305 is mtvec. */ - configASSERT( ( mtvec & 0x03UL ) == 0 ); - - /* Check alignment of the interrupt stack - which is the same as the - stack that was being used by main() prior to the scheduler being - started. */ - configASSERT( ( xISRStackTop & portBYTE_ALIGNMENT_MASK ) == 0 ); - - #ifdef configISR_STACK_SIZE_WORDS - { - memset( ( void * ) xISRStack, portISR_STACK_FILL_BYTE, sizeof( xISRStack ) ); - } - #endif /* configISR_STACK_SIZE_WORDS */ - } - #endif /* configASSERT_DEFINED */ - - /* If there is a CLINT then it is ok to use the default implementation - in this file, otherwise vPortSetupTimerInterrupt() must be implemented to - configure whichever clock is to be used to generate the tick interrupt. */ - vPortSetupTimerInterrupt(); - - #if( ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) ) - { - /* Enable mtime and external interrupts. 1<<7 for timer interrupt, 1<<11 - for external interrupt. _RB_ What happens here when mtime is not present as - with pulpino? */ - __asm volatile( "csrs 0x304, %0" :: "r"(0x880) ); /* 0x304 is mie. */ - } - #else - { - /* Enable external interrupts. */ - __asm volatile( "csrs 0x304, %0" :: "r"(0x800) ); /* 304 is mie. */ - } - #endif /* ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) */ - - xPortStartFirstTask(); - - /* Should not get here as after calling xPortStartFirstTask() only tasks - should be executing. */ - return pdFAIL; + extern void xPortStartFirstTask( void ); + + #if ( configASSERT_DEFINED == 1 ) + { + /* Check alignment of the interrupt stack - which is the same as the + * stack that was being used by main() prior to the scheduler being + * started. */ + configASSERT( ( xISRStackTop & portBYTE_ALIGNMENT_MASK ) == 0 ); + + #ifdef configISR_STACK_SIZE_WORDS + { + memset( ( void * ) xISRStack, portISR_STACK_FILL_BYTE, sizeof( xISRStack ) ); + } + #endif /* configISR_STACK_SIZE_WORDS */ + } + #endif /* configASSERT_DEFINED */ + + /* If there is a CLINT then it is ok to use the default implementation + * in this file, otherwise vPortSetupTimerInterrupt() must be implemented to + * configure whichever clock is to be used to generate the tick interrupt. */ + vPortSetupTimerInterrupt(); + + #if ( ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) ) + { + /* Enable mtime and external interrupts. 1<<7 for timer interrupt, + * 1<<11 for external interrupt. _RB_ What happens here when mtime is + * not present as with pulpino? */ + __asm volatile ( "csrs 0x304, %0" ::"r" ( 0x880 ) ); /* 0x304 is mie. */ + } + #endif /* ( configMTIME_BASE_ADDRESS != 0 ) && ( configMTIMECMP_BASE_ADDRESS != 0 ) */ + + xPortStartFirstTask(); + + /* Should not get here as after calling xPortStartFirstTask() only tasks + * should be executing. */ + return pdFAIL; } /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) { - /* Not implemented. */ - for( ;; ); + /* Not implemented. */ + for( ; ; ) + { + } } - - - - - +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/RISC-V/portASM.s b/portable/IAR/RISC-V/portASM.s index b529e9819e7..136464447ed 100644 --- a/portable/IAR/RISC-V/portASM.s +++ b/portable/IAR/RISC-V/portASM.s @@ -56,43 +56,27 @@ * registers. * */ -#if __riscv_xlen == 64 - #define portWORD_SIZE 8 - #define store_x sd - #define load_x ld -#elif __riscv_xlen == 32 - #define store_x sw - #define load_x lw - #define portWORD_SIZE 4 -#else - #error Assembler did not define __riscv_xlen -#endif -#include "freertos_risc_v_chip_specific_extensions.h" +#include "portContext.h" /* Check the freertos_risc_v_chip_specific_extensions.h and/or command line definitions. */ #if defined( portasmHAS_CLINT ) && defined( portasmHAS_MTIME ) - #error The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME. portasmHAS_CLINT and portasmHAS_MTIME cannot both be defined at once. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #error The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME. portasmHAS_CLINT and portasmHAS_MTIME cannot both be defined at once. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif #ifdef portasmHAS_CLINT - #warning The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT. For now portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT are derived from portasmHAS_CLINT. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html - #define portasmHAS_MTIME portasmHAS_CLINT - #define portasmHAS_SIFIVE_CLINT portasmHAS_CLINT + #warning The portasmHAS_CLINT constant has been deprecated. Please replace it with portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT. For now portasmHAS_MTIME and portasmHAS_SIFIVE_CLINT are derived from portasmHAS_CLINT. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #define portasmHAS_MTIME portasmHAS_CLINT + #define portasmHAS_SIFIVE_CLINT portasmHAS_CLINT #endif #ifndef portasmHAS_MTIME - #error freertos_risc_v_chip_specific_extensions.h must define portasmHAS_MTIME to either 1 (MTIME clock present) or 0 (MTIME clock not present). See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html -#endif - -#ifndef portasmHANDLE_INTERRUPT - #error portasmHANDLE_INTERRUPT must be defined to the function to be called to handle external/peripheral interrupts. portasmHANDLE_INTERRUPT can be defined on the assembler command line or in the appropriate freertos_risc_v_chip_specific_extensions.h header file. https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #error freertos_risc_v_chip_specific_extensions.h must define portasmHAS_MTIME to either 1 (MTIME clock present) or 0 (MTIME clock not present). See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif - #ifndef portasmHAS_SIFIVE_CLINT - #define portasmHAS_SIFIVE_CLINT 0 + #define portasmHAS_SIFIVE_CLINT 0 #endif /* CSR definitions. */ @@ -101,260 +85,60 @@ definitions. */ #define CSR_MEPC 0x341 #define CSR_MCAUSE 0x342 - -/* Only the standard core registers are stored by default. Any additional -registers must be saved by the portasmSAVE_ADDITIONAL_REGISTERS and -portasmRESTORE_ADDITIONAL_REGISTERS macros - which can be defined in a chip -specific version of freertos_risc_v_chip_specific_extensions.h. See the notes -at the top of this file. */ -#define portCONTEXT_SIZE ( 30 * portWORD_SIZE ) - PUBLIC xPortStartFirstTask - PUBLIC freertos_risc_v_trap_handler PUBLIC pxPortInitialiseStack - EXTERN pxCurrentTCB - EXTERN ulPortTrapHandler + PUBLIC freertos_risc_v_trap_handler + PUBLIC freertos_risc_v_exception_handler + PUBLIC freertos_risc_v_interrupt_handler + PUBLIC freertos_risc_v_mtimer_interrupt_handler + EXTERN vTaskSwitchContext EXTERN xTaskIncrementTick - EXTERN Timer_IRQHandler EXTERN pullMachineTimerCompareRegister EXTERN pullNextTime EXTERN uxTimerIncrementsForOneTick /* size_t type so 32-bit on 32-bit core and 64-bits on 64-bit core. */ - EXTERN xISRStackTop - EXTERN portasmHANDLE_INTERRUPT + EXTERN xTaskReturnAddress + PUBWEAK freertos_risc_v_application_exception_handler + PUBWEAK freertos_risc_v_application_interrupt_handler /*-----------------------------------------------------------*/ SECTION `.text`:CODE:NOROOT(2) CODE -freertos_risc_v_trap_handler: - addi sp, sp, -portCONTEXT_SIZE - store_x x1, 1 * portWORD_SIZE( sp ) - store_x x5, 2 * portWORD_SIZE( sp ) - store_x x6, 3 * portWORD_SIZE( sp ) - store_x x7, 4 * portWORD_SIZE( sp ) - store_x x8, 5 * portWORD_SIZE( sp ) - store_x x9, 6 * portWORD_SIZE( sp ) - store_x x10, 7 * portWORD_SIZE( sp ) - store_x x11, 8 * portWORD_SIZE( sp ) - store_x x12, 9 * portWORD_SIZE( sp ) - store_x x13, 10 * portWORD_SIZE( sp ) - store_x x14, 11 * portWORD_SIZE( sp ) - store_x x15, 12 * portWORD_SIZE( sp ) - store_x x16, 13 * portWORD_SIZE( sp ) - store_x x17, 14 * portWORD_SIZE( sp ) - store_x x18, 15 * portWORD_SIZE( sp ) - store_x x19, 16 * portWORD_SIZE( sp ) - store_x x20, 17 * portWORD_SIZE( sp ) - store_x x21, 18 * portWORD_SIZE( sp ) - store_x x22, 19 * portWORD_SIZE( sp ) - store_x x23, 20 * portWORD_SIZE( sp ) - store_x x24, 21 * portWORD_SIZE( sp ) - store_x x25, 22 * portWORD_SIZE( sp ) - store_x x26, 23 * portWORD_SIZE( sp ) - store_x x27, 24 * portWORD_SIZE( sp ) - store_x x28, 25 * portWORD_SIZE( sp ) - store_x x29, 26 * portWORD_SIZE( sp ) - store_x x30, 27 * portWORD_SIZE( sp ) - store_x x31, 28 * portWORD_SIZE( sp ) - - csrr t0, CSR_MSTATUS /* Required for MPIE bit. */ - store_x t0, 29 * portWORD_SIZE( sp ) - - portasmSAVE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to save any registers unique to the RISC-V implementation. */ - - load_x t0, pxCurrentTCB /* Load pxCurrentTCB. */ - store_x sp, 0( t0 ) /* Write sp to first TCB member. */ - - csrr a0, CSR_MCAUSE - csrr a1, CSR_MEPC - -test_if_asynchronous: - srli a2, a0, __riscv_xlen - 1 /* MSB of mcause is 1 if handing an asynchronous interrupt - shift to LSB to clear other bits. */ - beq a2, x0, handle_synchronous /* Branch past interrupt handing if not asynchronous. */ - store_x a1, 0( sp ) /* Asynch so save unmodified exception return address. */ - -handle_asynchronous: - -#if( portasmHAS_MTIME != 0 ) - - test_if_mtimer: /* If there is a CLINT then the mtimer is used to generate the tick interrupt. */ - - addi t0, x0, 1 - - slli t0, t0, __riscv_xlen - 1 /* LSB is already set, shift into MSB. Shift 31 on 32-bit or 63 on 64-bit cores. */ - addi t1, t0, 7 /* 0x8000[]0007 == machine timer interrupt. */ - bne a0, t1, test_if_external_interrupt - - load_x t0, pullMachineTimerCompareRegister /* Load address of compare register into t0. */ - load_x t1, pullNextTime /* Load the address of ullNextTime into t1. */ - - #if( __riscv_xlen == 32 ) - - /* Update the 64-bit mtimer compare match value in two 32-bit writes. */ - li t4, -1 - lw t2, 0(t1) /* Load the low word of ullNextTime into t2. */ - lw t3, 4(t1) /* Load the high word of ullNextTime into t3. */ - sw t4, 0(t0) /* Low word no smaller than old value to start with - will be overwritten below. */ - sw t3, 4(t0) /* Store high word of ullNextTime into compare register. No smaller than new value. */ - sw t2, 0(t0) /* Store low word of ullNextTime into compare register. */ - lw t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ - add t4, t0, t2 /* Add the low word of ullNextTime to the timer increments for one tick (assumes timer increment for one tick fits in 32-bits). */ - sltu t5, t4, t2 /* See if the sum of low words overflowed (what about the zero case?). */ - add t6, t3, t5 /* Add overflow to high word of ullNextTime. */ - sw t4, 0(t1) /* Store new low word of ullNextTime. */ - sw t6, 4(t1) /* Store new high word of ullNextTime. */ - - #endif /* __riscv_xlen == 32 */ - - #if( __riscv_xlen == 64 ) - - /* Update the 64-bit mtimer compare match value. */ - ld t2, 0(t1) /* Load ullNextTime into t2. */ - sd t2, 0(t0) /* Store ullNextTime into compare register. */ - ld t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ - add t4, t0, t2 /* Add ullNextTime to the timer increments for one tick. */ - sd t4, 0(t1) /* Store ullNextTime. */ - - #endif /* __riscv_xlen == 64 */ - - load_x sp, xISRStackTop /* Switch to ISR stack before function call. */ - jal xTaskIncrementTick - beqz a0, processed_source /* Don't switch context if incrementing tick didn't unblock a task. */ - jal vTaskSwitchContext - j processed_source - - test_if_external_interrupt: /* If there is a CLINT and the mtimer interrupt is not pending then check to see if an external interrupt is pending. */ - addi t1, t1, 4 /* 0x80000007 + 4 = 0x8000000b == Machine external interrupt. */ - bne a0, t1, as_yet_unhandled /* Something as yet unhandled. */ - -#endif /* portasmHAS_MTIME */ - - load_x sp, xISRStackTop /* Switch to ISR stack before function call. */ - jal portasmHANDLE_INTERRUPT /* Jump to the interrupt handler if there is no CLINT or if there is a CLINT and it has been determined that an external interrupt is pending. */ - j processed_source - -handle_synchronous: - addi a1, a1, 4 /* Synchronous so updated exception return address to the instruction after the instruction that generated the exeption. */ - store_x a1, 0( sp ) /* Save updated exception return address. */ - -test_if_environment_call: - li t0, 11 /* 11 == environment call. */ - bne a0, t0, is_exception /* Not an M environment call, so some other exception. */ - load_x sp, xISRStackTop /* Switch to ISR stack before function call. */ - jal vTaskSwitchContext - j processed_source - -is_exception: - csrr t0, CSR_MCAUSE /* For viewing in the debugger only. */ - csrr t1, CSR_MEPC /* For viewing in the debugger only */ - csrr t2, CSR_MSTATUS - j is_exception /* No other exceptions handled yet. */ - -as_yet_unhandled: - csrr t0, mcause /* For viewing in the debugger only. */ - j as_yet_unhandled - -processed_source: - load_x t1, pxCurrentTCB /* Load pxCurrentTCB. */ - load_x sp, 0( t1 ) /* Read sp from first TCB member. */ - - /* Load mret with the address of the next instruction in the task to run next. */ - load_x t0, 0( sp ) - csrw CSR_MEPC, t0 - - portasmRESTORE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */ - - /* Load mstatus with the interrupt enable bits used by the task. */ - load_x t0, 29 * portWORD_SIZE( sp ) - csrw CSR_MSTATUS, t0 /* Required for MPIE bit. */ - - load_x x1, 1 * portWORD_SIZE( sp ) - load_x x5, 2 * portWORD_SIZE( sp ) /* t0 */ - load_x x6, 3 * portWORD_SIZE( sp ) /* t1 */ - load_x x7, 4 * portWORD_SIZE( sp ) /* t2 */ - load_x x8, 5 * portWORD_SIZE( sp ) /* s0/fp */ - load_x x9, 6 * portWORD_SIZE( sp ) /* s1 */ - load_x x10, 7 * portWORD_SIZE( sp ) /* a0 */ - load_x x11, 8 * portWORD_SIZE( sp ) /* a1 */ - load_x x12, 9 * portWORD_SIZE( sp ) /* a2 */ - load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */ - load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */ - load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */ - load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */ - load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */ - load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */ - load_x x19, 16 * portWORD_SIZE( sp ) /* s3 */ - load_x x20, 17 * portWORD_SIZE( sp ) /* s4 */ - load_x x21, 18 * portWORD_SIZE( sp ) /* s5 */ - load_x x22, 19 * portWORD_SIZE( sp ) /* s6 */ - load_x x23, 20 * portWORD_SIZE( sp ) /* s7 */ - load_x x24, 21 * portWORD_SIZE( sp ) /* s8 */ - load_x x25, 22 * portWORD_SIZE( sp ) /* s9 */ - load_x x26, 23 * portWORD_SIZE( sp ) /* s10 */ - load_x x27, 24 * portWORD_SIZE( sp ) /* s11 */ - load_x x28, 25 * portWORD_SIZE( sp ) /* t3 */ - load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */ - load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */ - load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */ - addi sp, sp, portCONTEXT_SIZE - - mret - -/*-----------------------------------------------------------*/ - -xPortStartFirstTask: - -#if( portasmHAS_SIFIVE_CLINT != 0 ) - /* If there is a clint then interrupts can branch directly to the FreeRTOS - trap handler. Otherwise the interrupt controller will need to be configured - outside of this file. */ - la t0, freertos_risc_v_trap_handler - csrw CSR_MTVEC, t0 -#endif /* portasmHAS_CLILNT */ - - load_x sp, pxCurrentTCB /* Load pxCurrentTCB. */ - load_x sp, 0( sp ) /* Read sp from first TCB member. */ - - load_x x1, 0( sp ) /* Note for starting the scheduler the exception return address is used as the function return address. */ - - portasmRESTORE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */ - - load_x x6, 3 * portWORD_SIZE( sp ) /* t1 */ - load_x x7, 4 * portWORD_SIZE( sp ) /* t2 */ - load_x x8, 5 * portWORD_SIZE( sp ) /* s0/fp */ - load_x x9, 6 * portWORD_SIZE( sp ) /* s1 */ - load_x x10, 7 * portWORD_SIZE( sp ) /* a0 */ - load_x x11, 8 * portWORD_SIZE( sp ) /* a1 */ - load_x x12, 9 * portWORD_SIZE( sp ) /* a2 */ - load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */ - load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */ - load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */ - load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */ - load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */ - load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */ - load_x x19, 16 * portWORD_SIZE( sp ) /* s3 */ - load_x x20, 17 * portWORD_SIZE( sp ) /* s4 */ - load_x x21, 18 * portWORD_SIZE( sp ) /* s5 */ - load_x x22, 19 * portWORD_SIZE( sp ) /* s6 */ - load_x x23, 20 * portWORD_SIZE( sp ) /* s7 */ - load_x x24, 21 * portWORD_SIZE( sp ) /* s8 */ - load_x x25, 22 * portWORD_SIZE( sp ) /* s9 */ - load_x x26, 23 * portWORD_SIZE( sp ) /* s10 */ - load_x x27, 24 * portWORD_SIZE( sp ) /* s11 */ - load_x x28, 25 * portWORD_SIZE( sp ) /* t3 */ - load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */ - load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */ - load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */ - - load_x x5, 29 * portWORD_SIZE( sp ) /* Initial mstatus into x5 (t0) */ - addi x5, x5, 0x08 /* Set MIE bit so the first task starts with interrupts enabled - required as returns with ret not eret. */ - csrrw x0, CSR_MSTATUS, x5 /* Interrupts enabled from here! */ - load_x x5, 2 * portWORD_SIZE( sp ) /* Initial x5 (t0) value. */ - addi sp, sp, portCONTEXT_SIZE - ret - +portUPDATE_MTIMER_COMPARE_REGISTER MACRO + load_x t0, pullMachineTimerCompareRegister /* Load address of compare register into t0. */ + load_x t1, pullNextTime /* Load the address of ullNextTime into t1. */ + + #if( __riscv_xlen == 32 ) + + /* Update the 64-bit mtimer compare match value in two 32-bit writes. */ + li t4, -1 + lw t2, 0(t1) /* Load the low word of ullNextTime into t2. */ + lw t3, 4(t1) /* Load the high word of ullNextTime into t3. */ + sw t4, 0(t0) /* Low word no smaller than old value to start with - will be overwritten below. */ + sw t3, 4(t0) /* Store high word of ullNextTime into compare register. No smaller than new value. */ + sw t2, 0(t0) /* Store low word of ullNextTime into compare register. */ + lw t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ + add t4, t0, t2 /* Add the low word of ullNextTime to the timer increments for one tick (assumes timer increment for one tick fits in 32-bits). */ + sltu t5, t4, t2 /* See if the sum of low words overflowed (what about the zero case?). */ + add t6, t3, t5 /* Add overflow to high word of ullNextTime. */ + sw t4, 0(t1) /* Store new low word of ullNextTime. */ + sw t6, 4(t1) /* Store new high word of ullNextTime. */ + + #endif /* __riscv_xlen == 32 */ + + #if( __riscv_xlen == 64 ) + + /* Update the 64-bit mtimer compare match value. */ + ld t2, 0(t1) /* Load ullNextTime into t2. */ + sd t2, 0(t0) /* Store ullNextTime into compare register. */ + ld t0, uxTimerIncrementsForOneTick /* Load the value of ullTimerIncrementForOneTick into t0 (could this be optimized by storing in an array next to pullNextTime?). */ + add t4, t0, t2 /* Add ullNextTime to the timer increments for one tick. */ + sd t4, 0(t1) /* Store ullNextTime. */ + + #endif /* __riscv_xlen == 64 */ + ENDM /*-----------------------------------------------------------*/ /* @@ -369,25 +153,26 @@ xPortStartFirstTask: * RISC-V maps registers to ABI names as follows (X1 to X31 integer registers * for the 'I' profile, X1 to X15 for the 'E' profile, currently I assumed). * - * Register ABI Name Description Saver - * x0 zero Hard-wired zero - - * x1 ra Return address Caller - * x2 sp Stack pointer Callee - * x3 gp Global pointer - - * x4 tp Thread pointer - - * x5-7 t0-2 Temporaries Caller - * x8 s0/fp Saved register/Frame pointer Callee - * x9 s1 Saved register Callee - * x10-11 a0-1 Function Arguments/return values Caller - * x12-17 a2-7 Function arguments Caller - * x18-27 s2-11 Saved registers Callee - * x28-31 t3-6 Temporaries Caller + * Register ABI Name Description Saver + * x0 zero Hard-wired zero - + * x1 ra Return address Caller + * x2 sp Stack pointer Callee + * x3 gp Global pointer - + * x4 tp Thread pointer - + * x5-7 t0-2 Temporaries Caller + * x8 s0/fp Saved register/Frame pointer Callee + * x9 s1 Saved register Callee + * x10-11 a0-1 Function Arguments/return values Caller + * x12-17 a2-7 Function arguments Caller + * x18-27 s2-11 Saved registers Callee + * x28-31 t3-6 Temporaries Caller * * The RISC-V context is saved t FreeRTOS tasks in the following stack frame, * where the global and thread pointers are currently assumed to be constant so * are not saved: * * mstatus + * xCriticalNesting * x31 * x30 * x29 @@ -420,29 +205,189 @@ xPortStartFirstTask: * pxCode */ pxPortInitialiseStack: - - csrr t0, CSR_MSTATUS /* Obtain current mstatus value. */ - andi t0, t0, ~0x8 /* Ensure interrupts are disabled when the stack is restored within an ISR. Required when a task is created after the schedulre has been started, otherwise interrupts would be disabled anyway. */ - addi t1, x0, 0x188 /* Generate the value 0x1880, which are the MPIE and MPP bits to set in mstatus. */ - slli t1, t1, 4 - or t0, t0, t1 /* Set MPIE and MPP bits in mstatus value. */ - - addi a0, a0, -portWORD_SIZE - store_x t0, 0(a0) /* mstatus onto the stack. */ - addi a0, a0, -(22 * portWORD_SIZE) /* Space for registers x11-x31. */ - store_x a2, 0(a0) /* Task parameters (pvParameters parameter) goes into register X10/a0 on the stack. */ - addi a0, a0, -(6 * portWORD_SIZE) /* Space for registers x5-x9. */ - store_x x0, 0(a0) /* Return address onto the stack, could be portTASK_RETURN_ADDRESS */ - addi t0, x0, portasmADDITIONAL_CONTEXT_SIZE /* The number of chip specific additional registers. */ -chip_specific_stack_frame: /* First add any chip specific registers to the stack frame being created. */ - beq t0, x0, no_more_regs /* No more chip specific registers to save. */ - addi a0, a0, -portWORD_SIZE /* Make space for chip specific register. */ - store_x x0, 0(a0) /* Give the chip specific register an initial value of zero. */ - addi t0, t0, -1 /* Decrement the count of chip specific registers remaining. */ - j chip_specific_stack_frame /* Until no more chip specific registers. */ + csrr t0, CSR_MSTATUS /* Obtain current mstatus value. */ + andi t0, t0, ~0x8 /* Ensure interrupts are disabled when the stack is restored within an ISR. Required when a task is created after the schedulre has been started, otherwise interrupts would be disabled anyway. */ + addi t1, x0, 0x188 /* Generate the value 0x1880, which are the MPIE and MPP bits to set in mstatus. */ + slli t1, t1, 4 + or t0, t0, t1 /* Set MPIE and MPP bits in mstatus value. */ + + addi a0, a0, -portWORD_SIZE + store_x t0, 0(a0) /* mstatus onto the stack. */ + addi a0, a0, -portWORD_SIZE /* Space for critical nesting count. */ + store_x x0, 0(a0) /* Critical nesting count starts at 0 for every task. */ + addi a0, a0, -(22 * portWORD_SIZE) /* Space for registers x11-x31. */ + store_x a2, 0(a0) /* Task parameters (pvParameters parameter) goes into register X10/a0 on the stack. */ + addi a0, a0, -(6 * portWORD_SIZE) /* Space for registers x5-x9. */ + load_x t0, xTaskReturnAddress + store_x t0, 0(a0) /* Return address onto the stack. */ + addi t0, x0, portasmADDITIONAL_CONTEXT_SIZE /* The number of chip specific additional registers. */ +chip_specific_stack_frame: /* First add any chip specific registers to the stack frame being created. */ + beq t0, x0, no_more_regs /* No more chip specific registers to save. */ + addi a0, a0, -portWORD_SIZE /* Make space for chip specific register. */ + store_x x0, 0(a0) /* Give the chip specific register an initial value of zero. */ + addi t0, t0, -1 /* Decrement the count of chip specific registers remaining. */ + j chip_specific_stack_frame /* Until no more chip specific registers. */ no_more_regs: - addi a0, a0, -portWORD_SIZE - store_x a1, 0(a0) /* mret value (pxCode parameter) onto the stack. */ - ret + addi a0, a0, -portWORD_SIZE + store_x a1, 0(a0) /* mret value (pxCode parameter) onto the stack. */ + ret +/*-----------------------------------------------------------*/ + +xPortStartFirstTask: + load_x sp, pxCurrentTCB /* Load pxCurrentTCB. */ + load_x sp, 0( sp ) /* Read sp from first TCB member. */ + + load_x x1, 0( sp ) /* Note for starting the scheduler the exception return address is used as the function return address. */ + + portasmRESTORE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */ + + load_x x7, 4 * portWORD_SIZE( sp ) /* t2 */ + load_x x8, 5 * portWORD_SIZE( sp ) /* s0/fp */ + load_x x9, 6 * portWORD_SIZE( sp ) /* s1 */ + load_x x10, 7 * portWORD_SIZE( sp ) /* a0 */ + load_x x11, 8 * portWORD_SIZE( sp ) /* a1 */ + load_x x12, 9 * portWORD_SIZE( sp ) /* a2 */ + load_x x13, 10 * portWORD_SIZE( sp ) /* a3 */ + load_x x14, 11 * portWORD_SIZE( sp ) /* a4 */ + load_x x15, 12 * portWORD_SIZE( sp ) /* a5 */ + load_x x16, 13 * portWORD_SIZE( sp ) /* a6 */ + load_x x17, 14 * portWORD_SIZE( sp ) /* a7 */ + load_x x18, 15 * portWORD_SIZE( sp ) /* s2 */ + load_x x19, 16 * portWORD_SIZE( sp ) /* s3 */ + load_x x20, 17 * portWORD_SIZE( sp ) /* s4 */ + load_x x21, 18 * portWORD_SIZE( sp ) /* s5 */ + load_x x22, 19 * portWORD_SIZE( sp ) /* s6 */ + load_x x23, 20 * portWORD_SIZE( sp ) /* s7 */ + load_x x24, 21 * portWORD_SIZE( sp ) /* s8 */ + load_x x25, 22 * portWORD_SIZE( sp ) /* s9 */ + load_x x26, 23 * portWORD_SIZE( sp ) /* s10 */ + load_x x27, 24 * portWORD_SIZE( sp ) /* s11 */ + load_x x28, 25 * portWORD_SIZE( sp ) /* t3 */ + load_x x29, 26 * portWORD_SIZE( sp ) /* t4 */ + load_x x30, 27 * portWORD_SIZE( sp ) /* t5 */ + load_x x31, 28 * portWORD_SIZE( sp ) /* t6 */ + + load_x x5, 29 * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ + load_x x6, pxCriticalNesting /* Load the address of xCriticalNesting into x6. */ + store_x x5, 0( x6 ) /* Restore the critical nesting value for this task. */ + + load_x x5, 30 * portWORD_SIZE( sp ) /* Initial mstatus into x5 (t0). */ + addi x5, x5, 0x08 /* Set MIE bit so the first task starts with interrupts enabled - required as returns with ret not eret. */ + csrrw x0, CSR_MSTATUS, x5 /* Interrupts enabled from here! */ + + load_x x5, 2 * portWORD_SIZE( sp ) /* Initial x5 (t0) value. */ + load_x x6, 3 * portWORD_SIZE( sp ) /* Initial x6 (t1) value. */ + + addi sp, sp, portCONTEXT_SIZE + ret +/*-----------------------------------------------------------*/ + +freertos_risc_v_application_exception_handler: + csrr t0, CSR_MCAUSE /* For viewing in the debugger only. */ + csrr t1, CSR_MEPC /* For viewing in the debugger only */ + csrr t2, CSR_MSTATUS /* For viewing in the debugger only */ + j $ +/*-----------------------------------------------------------*/ + +freertos_risc_v_application_interrupt_handler: + csrr t0, CSR_MCAUSE /* For viewing in the debugger only. */ + csrr t1, CSR_MEPC /* For viewing in the debugger only */ + csrr t2, CSR_MSTATUS /* For viewing in the debugger only */ + j $ +/*-----------------------------------------------------------*/ + + SECTION `.text.freertos_risc_v_exception_handler`:CODE:NOROOT(2) + CODE + +freertos_risc_v_exception_handler: + portcontextSAVE_EXCEPTION_CONTEXT + /* a0 now contains mcause. */ + li t0, 11 /* 11 == environment call. */ + bne a0, t0, other_exception /* Not an M environment call, so some other exception. */ + call vTaskSwitchContext + portcontextRESTORE_CONTEXT + +other_exception: + call freertos_risc_v_application_exception_handler + portcontextRESTORE_CONTEXT +/*-----------------------------------------------------------*/ + + SECTION `.text.freertos_risc_v_interrupt_handler`:CODE:NOROOT(2) + CODE + +freertos_risc_v_interrupt_handler: + portcontextSAVE_INTERRUPT_CONTEXT + call freertos_risc_v_application_interrupt_handler + portcontextRESTORE_CONTEXT +/*-----------------------------------------------------------*/ + + SECTION `.text.freertos_risc_v_mtimer_interrupt_handler`:CODE:NOROOT(2) + CODE + +freertos_risc_v_mtimer_interrupt_handler: + portcontextSAVE_INTERRUPT_CONTEXT + portUPDATE_MTIMER_COMPARE_REGISTER + call xTaskIncrementTick + beqz a0, exit_without_context_switch /* Don't switch context if incrementing tick didn't unblock a task. */ + call vTaskSwitchContext +exit_without_context_switch: + portcontextRESTORE_CONTEXT +/*-----------------------------------------------------------*/ + + SECTION `.text.freertos_risc_v_trap_handler`:CODE:NOROOT(8) + CODE + +freertos_risc_v_trap_handler: + portcontextSAVE_CONTEXT_INTERNAL + + csrr a0, CSR_MCAUSE + csrr a1, CSR_MEPC + + bge a0, x0, synchronous_exception + +asynchronous_interrupt: + store_x a1, 0( sp ) /* Asynchronous interrupt so save unmodified exception return address. */ + load_x sp, xISRStackTop /* Switch to ISR stack. */ + j handle_interrupt + +synchronous_exception: + addi a1, a1, 4 /* Synchronous so update exception return address to the instruction after the instruction that generated the exeption. */ + store_x a1, 0( sp ) /* Save updated exception return address. */ + load_x sp, xISRStackTop /* Switch to ISR stack. */ + j handle_exception + +handle_interrupt: +#if( portasmHAS_MTIME != 0 ) + + test_if_mtimer: /* If there is a CLINT then the mtimer is used to generate the tick interrupt. */ + addi t0, x0, 1 + slli t0, t0, __riscv_xlen - 1 /* LSB is already set, shift into MSB. Shift 31 on 32-bit or 63 on 64-bit cores. */ + addi t1, t0, 7 /* 0x8000[]0007 == machine timer interrupt. */ + bne a0, t1, application_interrupt_handler + + portUPDATE_MTIMER_COMPARE_REGISTER + call xTaskIncrementTick + beqz a0, processed_source /* Don't switch context if incrementing tick didn't unblock a task. */ + call vTaskSwitchContext + j processed_source + +#endif /* portasmHAS_MTIME */ +application_interrupt_handler: + call freertos_risc_v_application_interrupt_handler + j processed_source + +handle_exception: + /* a0 contains mcause. */ + li t0, 11 /* 11 == environment call. */ + bne a0, t0, application_exception_handler /* Not an M environment call, so some other exception. */ + call vTaskSwitchContext + j processed_source + +application_exception_handler: + call freertos_risc_v_application_exception_handler + j processed_source /* No other exceptions handled yet. */ + +processed_source: + portcontextRESTORE_CONTEXT /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RISC-V/portContext.h b/portable/IAR/RISC-V/portContext.h new file mode 100644 index 00000000000..8b883c87bce --- /dev/null +++ b/portable/IAR/RISC-V/portContext.h @@ -0,0 +1,177 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTCONTEXT_H +#define PORTCONTEXT_H + +#if __riscv_xlen == 64 + #define portWORD_SIZE 8 + #define store_x sd + #define load_x ld +#elif __riscv_xlen == 32 + #define store_x sw + #define load_x lw + #define portWORD_SIZE 4 +#else + #error Assembler did not define __riscv_xlen +#endif + +#include "freertos_risc_v_chip_specific_extensions.h" + +/* Only the standard core registers are stored by default. Any additional + * registers must be saved by the portasmSAVE_ADDITIONAL_REGISTERS and + * portasmRESTORE_ADDITIONAL_REGISTERS macros - which can be defined in a chip + * specific version of freertos_risc_v_chip_specific_extensions.h. See the + * notes at the top of portASM.S file. */ +#define portCONTEXT_SIZE ( 31 * portWORD_SIZE ) + + EXTERN pxCurrentTCB + EXTERN xISRStackTop + EXTERN xCriticalNesting + EXTERN pxCriticalNesting + +/*-----------------------------------------------------------*/ + +portcontextSAVE_CONTEXT_INTERNAL MACRO + addi sp, sp, -portCONTEXT_SIZE + store_x x1, 1 * portWORD_SIZE( sp ) + store_x x5, 2 * portWORD_SIZE( sp ) + store_x x6, 3 * portWORD_SIZE( sp ) + store_x x7, 4 * portWORD_SIZE( sp ) + store_x x8, 5 * portWORD_SIZE( sp ) + store_x x9, 6 * portWORD_SIZE( sp ) + store_x x10, 7 * portWORD_SIZE( sp ) + store_x x11, 8 * portWORD_SIZE( sp ) + store_x x12, 9 * portWORD_SIZE( sp ) + store_x x13, 10 * portWORD_SIZE( sp ) + store_x x14, 11 * portWORD_SIZE( sp ) + store_x x15, 12 * portWORD_SIZE( sp ) + store_x x16, 13 * portWORD_SIZE( sp ) + store_x x17, 14 * portWORD_SIZE( sp ) + store_x x18, 15 * portWORD_SIZE( sp ) + store_x x19, 16 * portWORD_SIZE( sp ) + store_x x20, 17 * portWORD_SIZE( sp ) + store_x x21, 18 * portWORD_SIZE( sp ) + store_x x22, 19 * portWORD_SIZE( sp ) + store_x x23, 20 * portWORD_SIZE( sp ) + store_x x24, 21 * portWORD_SIZE( sp ) + store_x x25, 22 * portWORD_SIZE( sp ) + store_x x26, 23 * portWORD_SIZE( sp ) + store_x x27, 24 * portWORD_SIZE( sp ) + store_x x28, 25 * portWORD_SIZE( sp ) + store_x x29, 26 * portWORD_SIZE( sp ) + store_x x30, 27 * portWORD_SIZE( sp ) + store_x x31, 28 * portWORD_SIZE( sp ) + + load_x t0, xCriticalNesting /* Load the value of xCriticalNesting into t0. */ + store_x t0, 29 * portWORD_SIZE( sp ) /* Store the critical nesting value to the stack. */ + + csrr t0, mstatus /* Required for MPIE bit. */ + store_x t0, 30 * portWORD_SIZE( sp ) + + portasmSAVE_ADDITIONAL_REGISTERS /* Defined in freertos_risc_v_chip_specific_extensions.h to save any registers unique to the RISC-V implementation. */ + + load_x t0, pxCurrentTCB /* Load pxCurrentTCB. */ + store_x sp, 0( t0 ) /* Write sp to first TCB member. */ + + ENDM +/*-----------------------------------------------------------*/ + +portcontextSAVE_EXCEPTION_CONTEXT MACRO + portcontextSAVE_CONTEXT_INTERNAL + csrr a0, mcause + csrr a1, mepc + addi a1, a1, 4 /* Synchronous so update exception return address to the instruction after the instruction that generated the exception. */ + store_x a1, 0( sp ) /* Save updated exception return address. */ + load_x sp, xISRStackTop /* Switch to ISR stack. */ + ENDM +/*-----------------------------------------------------------*/ + +portcontextSAVE_INTERRUPT_CONTEXT MACRO + portcontextSAVE_CONTEXT_INTERNAL + csrr a0, mcause + csrr a1, mepc + store_x a1, 0( sp ) /* Asynchronous interrupt so save unmodified exception return address. */ + load_x sp, xISRStackTop /* Switch to ISR stack. */ + ENDM +/*-----------------------------------------------------------*/ + +portcontextRESTORE_CONTEXT MACRO + load_x t1, pxCurrentTCB /* Load pxCurrentTCB. */ + load_x sp, 0( t1 ) /* Read sp from first TCB member. */ + + /* Load mepc with the address of the instruction in the task to run next. */ + load_x t0, 0( sp ) + csrw mepc, t0 + + /* Defined in freertos_risc_v_chip_specific_extensions.h to restore any registers unique to the RISC-V implementation. */ + portasmRESTORE_ADDITIONAL_REGISTERS + + /* Load mstatus with the interrupt enable bits used by the task. */ + load_x t0, 30 * portWORD_SIZE( sp ) + csrw mstatus, t0 /* Required for MPIE bit. */ + + load_x t0, 29 * portWORD_SIZE( sp ) /* Obtain xCriticalNesting value for this task from task's stack. */ + load_x t1, pxCriticalNesting /* Load the address of xCriticalNesting into t1. */ + store_x t0, 0( t1 ) /* Restore the critical nesting value for this task. */ + + load_x x1, 1 * portWORD_SIZE( sp ) + load_x x5, 2 * portWORD_SIZE( sp ) + load_x x6, 3 * portWORD_SIZE( sp ) + load_x x7, 4 * portWORD_SIZE( sp ) + load_x x8, 5 * portWORD_SIZE( sp ) + load_x x9, 6 * portWORD_SIZE( sp ) + load_x x10, 7 * portWORD_SIZE( sp ) + load_x x11, 8 * portWORD_SIZE( sp ) + load_x x12, 9 * portWORD_SIZE( sp ) + load_x x13, 10 * portWORD_SIZE( sp ) + load_x x14, 11 * portWORD_SIZE( sp ) + load_x x15, 12 * portWORD_SIZE( sp ) + load_x x16, 13 * portWORD_SIZE( sp ) + load_x x17, 14 * portWORD_SIZE( sp ) + load_x x18, 15 * portWORD_SIZE( sp ) + load_x x19, 16 * portWORD_SIZE( sp ) + load_x x20, 17 * portWORD_SIZE( sp ) + load_x x21, 18 * portWORD_SIZE( sp ) + load_x x22, 19 * portWORD_SIZE( sp ) + load_x x23, 20 * portWORD_SIZE( sp ) + load_x x24, 21 * portWORD_SIZE( sp ) + load_x x25, 22 * portWORD_SIZE( sp ) + load_x x26, 23 * portWORD_SIZE( sp ) + load_x x27, 24 * portWORD_SIZE( sp ) + load_x x28, 25 * portWORD_SIZE( sp ) + load_x x29, 26 * portWORD_SIZE( sp ) + load_x x30, 27 * portWORD_SIZE( sp ) + load_x x31, 28 * portWORD_SIZE( sp ) + addi sp, sp, portCONTEXT_SIZE + + mret + ENDM +/*-----------------------------------------------------------*/ + +#endif /* PORTCONTEXT_H */ diff --git a/portable/IAR/RISC-V/portmacro.h b/portable/IAR/RISC-V/portmacro.h index aac30900709..50e03e2f8f1 100644 --- a/portable/IAR/RISC-V/portmacro.h +++ b/portable/IAR/RISC-V/portmacro.h @@ -48,50 +48,43 @@ extern "C" { /* Type definitions. */ #if __riscv_xlen == 64 - #define portSTACK_TYPE uint64_t - #define portBASE_TYPE int64_t - #define portUBASE_TYPE uint64_t - #define portMAX_DELAY ( TickType_t ) 0xffffffffffffffffUL - #define portPOINTER_SIZE_TYPE uint64_t + #define portSTACK_TYPE uint64_t + #define portBASE_TYPE int64_t + #define portUBASE_TYPE uint64_t + #define portMAX_DELAY ( TickType_t ) 0xffffffffffffffffUL + #define portPOINTER_SIZE_TYPE uint64_t #elif __riscv_xlen == 32 - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE int32_t - #define portUBASE_TYPE uint32_t - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE int32_t + #define portUBASE_TYPE uint32_t + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - #error Assembler did not define __riscv_xlen + #error Assembler did not define __riscv_xlen #endif - typedef portSTACK_TYPE StackType_t; typedef portBASE_TYPE BaseType_t; typedef portUBASE_TYPE UBaseType_t; typedef portUBASE_TYPE TickType_t; /* Legacy type definitions. */ -#define portCHAR char -#define portFLOAT float -#define portDOUBLE double -#define portLONG long -#define portSHORT short +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do -not need to be guarded with a critical section. */ + * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 /*-----------------------------------------------------------*/ /* Architecture specifics. */ -#define portSTACK_GROWTH ( -1 ) -#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) -#ifdef __riscv64 - #error This is the RV32 port that has not yet been adapted for 64. - #define portBYTE_ALIGNMENT 16 -#else - #define portBYTE_ALIGNMENT 16 -#endif +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 16 /*-----------------------------------------------------------*/ - /* Scheduler utilities. */ extern void vTaskSwitchContext( void ); #define portYIELD() __asm volatile( "ecall" ); @@ -99,18 +92,30 @@ extern void vTaskSwitchContext( void ); #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ - /* Critical section management. */ -#define portCRITICAL_NESTING_IN_TCB 1 -extern void vTaskEnterCritical( void ); -extern void vTaskExitCritical( void ); +#define portCRITICAL_NESTING_IN_TCB 0 -#define portSET_INTERRUPT_MASK_FROM_ISR() 0 +#define portSET_INTERRUPT_MASK_FROM_ISR() 0 #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) ( void ) uxSavedStatusValue + #define portDISABLE_INTERRUPTS() __disable_interrupt() #define portENABLE_INTERRUPTS() __enable_interrupt() -#define portENTER_CRITICAL() vTaskEnterCritical() -#define portEXIT_CRITICAL() vTaskExitCritical() + +extern size_t xCriticalNesting; +#define portENTER_CRITICAL() \ +{ \ + portDISABLE_INTERRUPTS(); \ + xCriticalNesting++; \ +} + +#define portEXIT_CRITICAL() \ +{ \ + xCriticalNesting--; \ + if( xCriticalNesting == 0 ) \ + { \ + portENABLE_INTERRUPTS(); \ + } \ +} /*-----------------------------------------------------------*/ @@ -124,55 +129,51 @@ extern void vTaskExitCritical( void ); /*-----------------------------------------------------------*/ -/* Task function macros as described on the FreeRTOS.org WEB site. These are -not necessary for to use this port. They are defined so the common demo files -(which build with all the ports) will build. */ +/* Task function macros as described on the FreeRTOS.org WEB site. These are + * not necessary for to use this port. They are defined so the common demo + * files (which build with all the ports) will build. */ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) /*-----------------------------------------------------------*/ -#define portNOP() __asm volatile ( " nop " ) - -#define portINLINE __inline +#define portNOP() __asm volatile( " nop " ) +#define portINLINE __inline #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__(( always_inline)) + #define portFORCE_INLINE inline __attribute__(( always_inline)) #endif #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) - +/*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in -the source code because to do so would cause other compilers to generate -warnings. */ + * the source code because to do so would cause other compilers to generate + * warnings. */ #pragma diag_suppress=Pa082 /* configCLINT_BASE_ADDRESS is a legacy definition that was replaced by the -configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS definitions. For -backward compatibility derive the newer definitions from the old if the old -definition is found. */ + * configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS definitions. For + * backward compatibility derive the newer definitions from the old if the old + * definition is found. */ #if defined( configCLINT_BASE_ADDRESS ) && !defined( configMTIME_BASE_ADDRESS ) && ( configCLINT_BASE_ADDRESS == 0 ) - /* Legacy case where configCLINT_BASE_ADDRESS was defined as 0 to indicate - there was no CLINT. Equivalent now is to set the MTIME and MTIMECMP - addresses to 0. */ - #define configMTIME_BASE_ADDRESS ( 0 ) - #define configMTIMECMP_BASE_ADDRESS ( 0 ) + /* Legacy case where configCLINT_BASE_ADDRESS was defined as 0 to indicate + * there was no CLINT. Equivalent now is to set the MTIME and MTIMECMP + * addresses to 0. */ + #define configMTIME_BASE_ADDRESS ( 0 ) + #define configMTIMECMP_BASE_ADDRESS ( 0 ) #elif defined( configCLINT_BASE_ADDRESS ) && !defined( configMTIME_BASE_ADDRESS ) - /* Legacy case where configCLINT_BASE_ADDRESS was set to the base address of - the CLINT. Equivalent now is to derive the MTIME and MTIMECMP addresses - from the CLINT address. */ - #define configMTIME_BASE_ADDRESS ( ( configCLINT_BASE_ADDRESS ) + 0xBFF8UL ) - #define configMTIMECMP_BASE_ADDRESS ( ( configCLINT_BASE_ADDRESS ) + 0x4000UL ) + /* Legacy case where configCLINT_BASE_ADDRESS was set to the base address of + * the CLINT. Equivalent now is to derive the MTIME and MTIMECMP addresses + * from the CLINT address. */ + #define configMTIME_BASE_ADDRESS ( ( configCLINT_BASE_ADDRESS ) + 0xBFF8UL ) + #define configMTIMECMP_BASE_ADDRESS ( ( configCLINT_BASE_ADDRESS ) + 0x4000UL ) #elif !defined( configMTIME_BASE_ADDRESS ) || !defined( configMTIMECMP_BASE_ADDRESS ) - #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html + #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif - - #ifdef __cplusplus } #endif #endif /* PORTMACRO_H */ - From c5a1fee9efac71aff6b69e6594aa8deb475fd50c Mon Sep 17 00:00:00 2001 From: Cristian Cristea Date: Tue, 27 Sep 2022 00:43:30 +0300 Subject: [PATCH 098/164] Added better pointer declaration readability (#567) * Add better pointer declaration readability I revised the declaration of single-line pointers by splitting it into multiple lines. Now, every pointer is declared (and initialized accordingly) on its own line. This refactoring should enhance readability and decrease the probability of error when a new pointer is added/removed or a current one has its initialization value modified. Signed-off-by: Cristian Cristea * Remove unnecessary whitespace characters and lines It removes whitespace characters at the end of lines (empty or othwerwise) and clear lines at the end of the file (only one remains). It is an automatic operation done by git. Signed-off-by: Cristian Cristea Signed-off-by: Cristian Cristea --- event_groups.c | 3 ++- portable/ARMv8M/secure/heap/secure_heap.c | 7 +++++-- portable/GCC/ARM_CM23/secure/secure_heap.c | 7 +++++-- portable/GCC/ARM_CM33/secure/secure_heap.c | 7 +++++-- portable/GCC/ARM_CM55/secure/secure_heap.c | 7 +++++-- portable/GCC/ARM_CM85/secure/secure_heap.c | 7 +++++-- portable/GCC/MicroBlaze/port.c | 8 ++------ portable/GCC/MicroBlazeV8/port.c | 9 ++++----- portable/GCC/MicroBlazeV9/port.c | 5 ++--- portable/IAR/ARM_CM23/secure/secure_heap.c | 7 +++++-- portable/IAR/ARM_CM33/secure/secure_heap.c | 7 +++++-- portable/IAR/ARM_CM55/secure/secure_heap.c | 7 +++++-- portable/IAR/ARM_CM85/secure/secure_heap.c | 7 +++++-- portable/MemMang/heap_2.c | 4 +++- portable/MemMang/heap_4.c | 7 +++++-- portable/MemMang/heap_5.c | 10 +++++++--- portable/ThirdParty/GCC/Xtensa_ESP32/port.c | 3 ++- portable/ThirdParty/XCC/Xtensa/port.c | 3 ++- portable/oWatcom/16BitDOS/common/portcomn.c | 21 ++++++++++----------- tasks.c | 11 ++++++++--- 20 files changed, 92 insertions(+), 55 deletions(-) diff --git a/event_groups.c b/event_groups.c index 0ecfefa77ab..e68561f3ae3 100644 --- a/event_groups.c +++ b/event_groups.c @@ -533,7 +533,8 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) { - ListItem_t * pxListItem, * pxNext; + ListItem_t * pxListItem; + ListItem_t * pxNext; ListItem_t const * pxListEnd; List_t const * pxList; EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits; diff --git a/portable/ARMv8M/secure/heap/secure_heap.c b/portable/ARMv8M/secure/heap/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/ARMv8M/secure/heap/secure_heap.c +++ b/portable/ARMv8M/secure/heap/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/GCC/ARM_CM23/secure/secure_heap.c b/portable/GCC/ARM_CM23/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/GCC/ARM_CM23/secure/secure_heap.c +++ b/portable/GCC/ARM_CM23/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/GCC/ARM_CM33/secure/secure_heap.c b/portable/GCC/ARM_CM33/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/GCC/ARM_CM33/secure/secure_heap.c +++ b/portable/GCC/ARM_CM33/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/GCC/ARM_CM55/secure/secure_heap.c b/portable/GCC/ARM_CM55/secure/secure_heap.c index c633e2d0596..157fdbf0eec 100644 --- a/portable/GCC/ARM_CM55/secure/secure_heap.c +++ b/portable/GCC/ARM_CM55/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/GCC/ARM_CM85/secure/secure_heap.c b/portable/GCC/ARM_CM85/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/GCC/ARM_CM85/secure/secure_heap.c +++ b/portable/GCC/ARM_CM85/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/GCC/MicroBlaze/port.c b/portable/GCC/MicroBlaze/port.c index 48d661b8dcf..b1df8fbe13e 100644 --- a/portable/GCC/MicroBlaze/port.c +++ b/portable/GCC/MicroBlaze/port.c @@ -88,7 +88,8 @@ static void prvSetupTimerInterrupt( void ); */ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) { -extern void *_SDA2_BASE_, *_SDA_BASE_; +extern void * _SDA2_BASE_; +extern void * _SDA_BASE_; const uint32_t ulR2 = ( uint32_t ) &_SDA2_BASE_; const uint32_t ulR13 = ( uint32_t ) &_SDA_BASE_; @@ -327,8 +328,3 @@ uint32_t ulCSR; XTmrCtr_mSetControlStatusReg( XPAR_OPB_TIMER_1_BASEADDR, portCOUNTER_0, ulCSR ); } /*-----------------------------------------------------------*/ - - - - - diff --git a/portable/GCC/MicroBlazeV8/port.c b/portable/GCC/MicroBlazeV8/port.c index 381702fb013..720c1444742 100644 --- a/portable/GCC/MicroBlazeV8/port.c +++ b/portable/GCC/MicroBlazeV8/port.c @@ -48,7 +48,7 @@ the scheduler being commenced interrupts should not be enabled, so the critical nesting variable is initialised to a non-zero value. */ #define portINITIAL_NESTING_VALUE ( 0xff ) -/* The bit within the MSR register that enabled/disables interrupts and +/* The bit within the MSR register that enabled/disables interrupts and exceptions respectively. */ #define portMSR_IE ( 0x02U ) #define portMSR_EE ( 0x100U ) @@ -106,7 +106,8 @@ static XIntc xInterruptControllerInstance; */ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) { -extern void *_SDA2_BASE_, *_SDA_BASE_; +extern void * _SDA2_BASE_; +extern void * _SDA_BASE_; const uint32_t ulR2 = ( uint32_t ) &_SDA2_BASE_; const uint32_t ulR13 = ( uint32_t ) &_SDA_BASE_; @@ -130,7 +131,7 @@ const uint32_t ulR13 = ( uint32_t ) &_SDA_BASE_; disabled. Each task will enable interrupts automatically when it enters the running state for the first time. */ *pxTopOfStack = mfmsr() & ~portMSR_IE; - + #if( MICROBLAZE_EXCEPTIONS_ENABLED == 1 ) { /* Ensure exceptions are enabled for the task. */ @@ -449,5 +450,3 @@ int32_t lStatus; return lStatus; } /*-----------------------------------------------------------*/ - - diff --git a/portable/GCC/MicroBlazeV9/port.c b/portable/GCC/MicroBlazeV9/port.c index 7c605e00337..4f54f9986ca 100644 --- a/portable/GCC/MicroBlazeV9/port.c +++ b/portable/GCC/MicroBlazeV9/port.c @@ -111,7 +111,8 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEn StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) #endif { -extern void *_SDA2_BASE_, *_SDA_BASE_; +extern void * _SDA2_BASE_; +extern void * _SDA_BASE_; const uint32_t ulR2 = ( uint32_t ) &_SDA2_BASE_; const uint32_t ulR13 = ( uint32_t ) &_SDA_BASE_; extern void _start1( void ); @@ -487,5 +488,3 @@ int32_t lStatus; return lStatus; } /*-----------------------------------------------------------*/ - - diff --git a/portable/IAR/ARM_CM23/secure/secure_heap.c b/portable/IAR/ARM_CM23/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/IAR/ARM_CM23/secure/secure_heap.c +++ b/portable/IAR/ARM_CM23/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/IAR/ARM_CM33/secure/secure_heap.c b/portable/IAR/ARM_CM33/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/IAR/ARM_CM33/secure/secure_heap.c +++ b/portable/IAR/ARM_CM33/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/IAR/ARM_CM55/secure/secure_heap.c b/portable/IAR/ARM_CM55/secure/secure_heap.c index c633e2d0596..157fdbf0eec 100644 --- a/portable/IAR/ARM_CM55/secure/secure_heap.c +++ b/portable/IAR/ARM_CM55/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/IAR/ARM_CM85/secure/secure_heap.c b/portable/IAR/ARM_CM85/secure/secure_heap.c index b3a7378188b..741b46371cc 100644 --- a/portable/IAR/ARM_CM85/secure/secure_heap.c +++ b/portable/IAR/ARM_CM85/secure/secure_heap.c @@ -113,7 +113,8 @@ static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( s /** * @brief Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /** * @brief Keeps track of the number of free bytes remaining, but says nothing @@ -245,7 +246,9 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; /* If this is the first call to malloc then the heap will require diff --git a/portable/MemMang/heap_2.c b/portable/MemMang/heap_2.c index a22163eacf7..124fe86d8cf 100644 --- a/portable/MemMang/heap_2.c +++ b/portable/MemMang/heap_2.c @@ -152,7 +152,9 @@ static void prvHeapInit( void ) PRIVILEGED_FUNCTION; void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; PRIVILEGED_DATA static BaseType_t xHeapHasBeenInitialised = pdFALSE; void * pvReturn = NULL; size_t xAdditionalRequiredSize; diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index f61162a64c9..2a594a690d2 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -123,7 +123,8 @@ static void prvHeapInit( void ) PRIVILEGED_FUNCTION; static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); /* Create a couple of list links to mark the start and end of the list. */ -PRIVILEGED_DATA static BlockLink_t xStart, * pxEnd = NULL; +PRIVILEGED_DATA static BlockLink_t xStart; +PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL; /* Keeps track of the number of calls to allocate and free memory as well as the * number of free bytes remaining, but says nothing about fragmentation. */ @@ -136,7 +137,9 @@ PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = 0; void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; size_t xAdditionalRequiredSize; diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index c5d29d90850..38953c5f2cd 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -141,7 +141,8 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); /* Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, * pxEnd = NULL; +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; /* Keeps track of the number of calls to allocate and free memory as well as the * number of free bytes remaining, but says nothing about fragmentation. */ @@ -154,7 +155,9 @@ static size_t xNumberOfSuccessfulFrees = 0; void * pvPortMalloc( size_t xWantedSize ) { - BlockLink_t * pxBlock, * pxPreviousBlock, * pxNewBlockLink; + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; void * pvReturn = NULL; size_t xAdditionalRequiredSize; @@ -441,7 +444,8 @@ static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) { - BlockLink_t * pxFirstFreeBlockInRegion = NULL, * pxPreviousFreeBlock; + BlockLink_t * pxFirstFreeBlockInRegion = NULL; + BlockLink_t * pxPreviousFreeBlock; portPOINTER_SIZE_TYPE xAlignedHeap; size_t xTotalRegionSize, xTotalHeapSize = 0; BaseType_t xDefinedRegions = 0; diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/port.c b/portable/ThirdParty/GCC/Xtensa_ESP32/port.c index 2e3831cbfa5..7675591500e 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/port.c +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/port.c @@ -140,7 +140,8 @@ void _xt_user_exit( void ); #endif /* *INDENT-ON* */ { - StackType_t * sp, * tp; + StackType_t * sp; + StackType_t * tp; XtExcFrame * frame; #if XCHAL_CP_NUM > 0 diff --git a/portable/ThirdParty/XCC/Xtensa/port.c b/portable/ThirdParty/XCC/Xtensa/port.c index 5a6addecb23..6320ca2c969 100644 --- a/portable/ThirdParty/XCC/Xtensa/port.c +++ b/portable/ThirdParty/XCC/Xtensa/port.c @@ -67,7 +67,8 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) #endif { - StackType_t * sp, * tp; + StackType_t * sp; + StackType_t * tp; XtExcFrame * frame; #if XCHAL_CP_NUM > 0 diff --git a/portable/oWatcom/16BitDOS/common/portcomn.c b/portable/oWatcom/16BitDOS/common/portcomn.c index 38e6c0e0f4d..62fad759c9c 100644 --- a/portable/oWatcom/16BitDOS/common/portcomn.c +++ b/portable/oWatcom/16BitDOS/common/portcomn.c @@ -28,8 +28,8 @@ /* Changes from V1.00: - - + pxPortInitialiseStack() now initialises the stack of new tasks to the + + + pxPortInitialiseStack() now initialises the stack of new tasks to the same format used by the compiler. This allows the compiler generated interrupt mechanism to be used for context switches. @@ -43,7 +43,7 @@ Changes from V2.6.1: + usPortCheckFreeStackSpace() has been moved to tasks.c. */ - + #include #include "FreeRTOS.h" @@ -53,9 +53,10 @@ Changes from V2.6.1: /* See header file for description. */ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) { -StackType_t DS_Reg = 0, *pxOriginalSP; +StackType_t DS_Reg = 0; +StackType_t * pxOriginalSP; - /* Place a few bytes of known values on the bottom of the stack. + /* Place a few bytes of known values on the bottom of the stack. This is just useful for debugging. */ *pxTopOfStack = 0x1111; @@ -74,9 +75,9 @@ StackType_t DS_Reg = 0, *pxOriginalSP; /* We are going to start the scheduler using a return from interrupt instruction to load the program counter, so first there would be the - status register and interrupt return address. We make this the start + status register and interrupt return address. We make this the start of the task. */ - *pxTopOfStack = portINITIAL_SW; + *pxTopOfStack = portINITIAL_SW; pxTopOfStack--; *pxTopOfStack = FP_SEG( pxCode ); pxTopOfStack--; @@ -86,11 +87,11 @@ StackType_t DS_Reg = 0, *pxOriginalSP; /* We are going to setup the stack for the new task to look like the stack frame was setup by a compiler generated ISR. We need to know the address of the existing stack top to place in the SP register within - the stack frame. pxOriginalSP holds SP before (simulated) pusha was + the stack frame. pxOriginalSP holds SP before (simulated) pusha was called. */ pxOriginalSP = pxTopOfStack; - /* The remaining registers would be pushed on the stack by our context + /* The remaining registers would be pushed on the stack by our context switch function. These are loaded with values simply to make debugging easier. */ *pxTopOfStack = FP_OFF( pvParameters ); /* AX */ @@ -138,5 +139,3 @@ StackType_t DS_Reg = 0, *pxOriginalSP; return pxTopOfStack; } /*-----------------------------------------------------------*/ - - diff --git a/tasks.c b/tasks.c index 5277eb92d4c..b0d57265429 100644 --- a/tasks.c +++ b/tasks.c @@ -2054,7 +2054,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) eTaskState eTaskGetState( TaskHandle_t xTask ) { eTaskState eReturn; - List_t const * pxStateList, * pxDelayedList, * pxOverflowedDelayedList; + List_t const * pxStateList; + List_t const * pxDelayedList; + List_t const * pxOverflowedDelayedList; const TCB_t * const pxTCB = xTask; configASSERT( pxTCB ); @@ -3422,7 +3424,9 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, const char pcNameToQuery[] ) { - TCB_t * pxNextTCB, * pxFirstTCB, * pxReturn = NULL; + TCB_t * pxNextTCB; + TCB_t * pxFirstTCB; + TCB_t * pxReturn = NULL; UBaseType_t x; char cNextChar; BaseType_t xBreakLoop; @@ -5152,7 +5156,8 @@ static void prvCheckTasksWaitingTermination( void ) List_t * pxList, eTaskState eState ) { - configLIST_VOLATILE TCB_t * pxNextTCB, * pxFirstTCB; + configLIST_VOLATILE TCB_t * pxNextTCB; + configLIST_VOLATILE TCB_t * pxFirstTCB; UBaseType_t uxTask = 0; if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) From b939e2fd2e33bb4db49e40e631316060967a7d80 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Wed, 28 Sep 2022 21:42:05 +0530 Subject: [PATCH 099/164] Update doc comments in task.h (#570) Signed-off-by: Gaurav Aggarwal --- include/task.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/task.h b/include/task.h index e681b018d0e..18b7bce2c9d 100644 --- a/include/task.h +++ b/include/task.h @@ -664,7 +664,7 @@ typedef enum * // Create a task from the const structure defined above. The task handle * // is requested (the second parameter is not NULL) but in this case just for * // demonstration purposes as its not actually used. - * xTaskCreateRestricted( &xRegTest1Parameters, &xHandle ); + * xTaskCreateRestrictedStatic( &xRegTest1Parameters, &xHandle ); * * // Start the scheduler. * vTaskStartScheduler(); @@ -732,7 +732,7 @@ typedef enum * // defined or shared regions have been declared elsewhere). * } * @endcode - * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \defgroup vTaskAllocateMPURegions vTaskAllocateMPURegions * \ingroup Tasks */ void vTaskAllocateMPURegions( TaskHandle_t xTask, From 553902874b3d0192e84f72e4bf6e233ae4adb70a Mon Sep 17 00:00:00 2001 From: Jeff Tenney Date: Mon, 3 Oct 2022 12:39:17 -0700 Subject: [PATCH 100/164] Tickless idle fixes/improvement (#59) * Fix tickless idle when stopping systick on zero... ...and don't stop SysTick at all in the eAbortSleep case. Prior to this commit, if vPortSuppressTicksAndSleep() happens to stop the SysTick on zero, then after tickless idle ends, xTickCount advances one full tick more than the time that actually elapsed as measured by the SysTick. See "bug 1" in this forum post: https://forums.freertos.org/t/ultasknotifytake-timeout-accuracy/9629/40 SysTick ------- The SysTick is the hardware timer that provides the OS tick interrupt in the official ports for Cortex M. SysTick starts counting down from the value stored in its reload register. When SysTick reaches zero, it requests an interrupt. On the next SysTick clock cycle, it loads the counter again from the reload register. To get periodic interrupts every N SysTick clock cycles, the reload register must be N - 1. Bug Example ----------- - Idle task calls vPortSuppressTicksAndSleep(xExpectedIdleTime = 2). [Doesn't have to be "2" -- could be any number.] - vPortSuppressTicksAndSleep() stops SysTick, and the current-count register happens to stop on zero. - SysTick ISR executes, setting xPendedTicks = 1 - vPortSuppressTicksAndSleep() masks interrupts and calls eTaskConfirmSleepModeStatus() which confirms the sleep operation. *** - vPortSuppressTicksAndSleep() configures SysTick for 1 full tick (xExpectedIdleTime - 1) plus the current-count register (which is 0) - One tick period elapses in sleep. - SysTick wakes CPU, ISR executes and increments xPendedTicks to 2. - vPortSuppressTicksAndSleep() calls vTaskStepTick(1), then returns. - Idle task resumes scheduler, which increments xTickCount twice (for xPendedTicks = 2) In the end, two ticks elapsed as measured by SysTick, but the code increments xTickCount three times. The root cause is that the code assumes the SysTick current-count register always contains the number of SysTick counts remaining in the current tick period. However, when the current-count register is zero, there are ulTimerCountsForOneTick counts remaining, not zero. This error is not the kind of time slippage normally associated with tickless idle. *** Note that a recent commit https://github.com/FreeRTOS/FreeRTOS-Kernel/commit/e1b98f0 results in eAbortSleep in this case, due to xPendedTicks != 0. That commit does mostly resolve this bug without specifically mentioning it, and without this commit. But that resolution allows the code in port.c not to directly address the special case of stopping SysTick on zero in any code or comments. That commit also generates additional instances of eAbortSleep, and a second purpose of this commit is to optimize how vPortSuppressTicksAndSleep() behaves for eAbortSleep, as noted below. This commit also includes an optimization to avoid stopping the SysTick when eTaskConfirmSleepModeStatus() returns eAbortSleep. This optimization belongs with this fix because the method of handling the SysTick being stopped on zero changes with this optimization. * Fix imminent tick rescheduled after tickless idle Prior to this commit, if something other than systick wakes the CPU from tickless idle, vPortSuppressTicksAndSleep() might cause xTickCount to increment once too many times. See "bug 2" in this forum post: https://forums.freertos.org/t/ultasknotifytake-timeout-accuracy/9629/40 SysTick ------- The SysTick is the hardware timer that provides the OS tick interrupt in the official ports for Cortex M. SysTick starts counting down from the value stored in its reload register. When SysTick reaches zero, it requests an interrupt. On the next SysTick clock cycle, it loads the counter again from the reload register. To get periodic interrupts every N SysTick clock cycles, the reload register must be N - 1. Bug Example ----------- - CPU is sleeping in vPortSuppressTicksAndSleep() - Something other than the SysTick wakes the CPU. - vPortSuppressTicksAndSleep() calculates the number of SysTick counts until the next tick. The bug occurs only if this number is small. - vPortSuppressTicksAndSleep() puts this small number into the SysTick reload register, and starts SysTick. - vPortSuppressTicksAndSleep() calls vTaskStepTick() - While vTaskStepTick() executes, the SysTick expires. The ISR pends because interrupts are masked, and SysTick starts a 2nd period still based on the small number of counts in its reload register. This 2nd period is undesirable and is likely to cause the error noted below. - vPortSuppressTicksAndSleep() puts the normal tick duration into the SysTick's reload register. - vPortSuppressTicksAndSleep() unmasks interrupts before the SysTick starts a new period based on the new value in the reload register. [This is a race condition that can go either way, but for the bug to occur, the race must play out this way.] - The pending SysTick ISR executes and increments xPendedTicks. - The SysTick expires again, finishing the second very small period, and starts a new period this time based on the full tick duration. - The SysTick ISR increments xPendedTicks (or xTickCount) even though only a tiny fraction of a tick period has elapsed since the previous tick. The bug occurs when *two* consecutive small periods of the SysTick are both counted as ticks. The root cause is a race caused by the small SysTick period. If vPortSuppressTicksAndSleep() unmasks interrupts *after* the small period expires but *before* the SysTick starts a period based on the full tick period, then two small periods are counted as ticks when only one should be counted. The end result is xTickCount advancing nearly one full tick more than time actually elapsed as measured by the SysTick. This is not the kind of time slippage normally associated with tickless idle. After this commit the code starts the SysTick and then immediately modifies the reload register to ensure the very short cycle (if any) is conducted only once. This strategy requires special consideration for the build option that configures SysTick to use a divided clock. To avoid waiting around for the SysTick to load value from the reload register, the new code temporarily configures the SysTick to use the undivided clock. The resulting timing error is typical for tickless idle. The error (commonly known as drift or slippage in kernel time) caused by this strategy is equivalent to one or two counts in ulStoppedTimerCompensation. This commit also updates comments and #define symbols related to the SysTick clock option. The SysTick can optionally be clocked by a divided version of the CPU clock (commonly divide-by-8). The new code in this commit adjusts these comments and symbols to make them clearer and more useful in configurations that use the divided clock. The fix made in this commit requires the use of these symbols, as noted in the code comments. * Fix tickless idle with alternate systick clocking Prior to this commit, in configurations using the alternate SysTick clocking, vPortSuppressTicksAndSleep() might cause xTickCount to jump ahead as much as the entire expected idle time or fall behind as much as one full tick compared to time as measured by the SysTick. SysTick ------- The SysTick is the hardware timer that provides the OS tick interrupt in the official ports for Cortex M. SysTick starts counting down from the value stored in its reload register. When SysTick reaches zero, it requests an interrupt. On the next SysTick clock cycle, it loads the counter again from the reload register. The SysTick has a configuration option to be clocked by an alternate clock besides the core clock. This alternate clock is MCU dependent. Scenarios Fixed --------------- The new code in this commit handles the following scenarios that were not handled correctly prior to this commit. 1. Before the sleep, vPortSuppressTicksAndSleep() stops the SysTick on zero, long after SysTick reached zero. Prior to this commit, this scenario caused xTickCount to jump ahead one full tick for the same reason documented here: https://github.com/FreeRTOS/FreeRTOS-Kernel/pull/59/commits/0c7b04bd3a745c52151abebc882eed3f811c4c81 2. After the sleep, vPortSuppressTicksAndSleep() stops the SysTick before it loads the counter from the reload register. Prior to this commit, this scenario caused xTickCount to jump ahead by the entire expected idle time (xExpectedIdleTime) because the current-count register is zero before it loads from the reload register. 3. Prior to return, vPortSuppressTicksAndSleep() attempts to start a short SysTick period when the current SysTick clock cycle has a lot of time remaining. Prior to this commit, this scenario could cause xTickCount to fall behind by as much as nearly one full tick because the short SysTick cycle never started. Note that #3 is partially fixed by https://github.com/FreeRTOS/FreeRTOS-Kernel/pull/59/commits/967acc9b200d3d4beeb289d9da9e88798074b431 even though that commit addresses a different issue. So this commit completes the partial fix. * Improve comments and name of preprocessor symbol Add a note in the code comments that SysTick requests an interrupt when decrementing from 1 to 0, so that's why stopping SysTick on zero is a special case. Readers might unknowingly assume that SysTick requests an interrupt when wrapping from 0 back to the load-register value. Reconsider new "_SETTING" suffix since "_CONFIG" suffix seems more descriptive. The code relies on *both* of these preprocessor symbols: portNVIC_SYSTICK_CLK_BIT portNVIC_SYSTICK_CLK_BIT_CONFIG **new** A meaningful suffix is really helpful to distinguish the two symbols. * Revert introduction of 2nd name for NVIC register When I added portNVIC_ICSR_REG I didn't realize there was already a portNVIC_INT_CTRL_REG, which identifies the same register. Not good to have both. Note that portNVIC_INT_CTRL_REG is defined in portmacro.h and is already used in this file (port.c). * Replicate to other Cortex M ports Also set a new fiddle factor based on tests with a CM4F. I used gcc, optimizing at -O1. Users can fine-tune as needed. Also add configSYSTICK_CLOCK_HZ to the CM0 ports to be just like the other Cortex M ports. This change allowed uniformity in the default tickless implementations across all Cortex M ports. And CM0 is likely to benefit from configSYSTICK_CLOCK_HZ, especially considering new CM0 devices with very fast CPU clock speeds. * Revert changes to IAR-CM0-portmacro.h portNVIC_INT_CTRL_REG was already defined in port.c. No need to define it in portmacro.h. * Handle edge cases with slow SysTick clock Co-authored-by: Cobus van Eeden <35851496+cobusve@users.noreply.github.com> Co-authored-by: abhidixi11 <44424462+abhidixi11@users.noreply.github.com> Co-authored-by: Joseph Julicher Co-authored-by: alfred gedeon <28123637+alfred2g@users.noreply.github.com> --- portable/ARMv8M/non_secure/port.c | 460 +++++++++++--------- portable/CCS/ARM_CM3/port.c | 296 ++++++++----- portable/CCS/ARM_CM4F/port.c | 296 ++++++++----- portable/GCC/ARM_CM0/port.c | 195 ++++++--- portable/GCC/ARM_CM23/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM3/port.c | 294 ++++++++----- portable/GCC/ARM_CM33/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM4F/port.c | 294 ++++++++----- portable/GCC/ARM_CM7/r0p1/port.c | 294 ++++++++----- portable/IAR/ARM_CM0/port.c | 195 ++++++--- portable/IAR/ARM_CM0/portmacro.h | 5 +- portable/IAR/ARM_CM23/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM3/port.c | 294 ++++++++----- portable/IAR/ARM_CM33/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM4F/port.c | 294 ++++++++----- portable/IAR/ARM_CM7/r0p1/port.c | 294 ++++++++----- portable/MikroC/ARM_CM4F/port.c | 301 +++++++------ portable/RVDS/ARM_CM0/port.c | 189 +++++--- portable/RVDS/ARM_CM3/port.c | 302 +++++++------ portable/RVDS/ARM_CM4F/port.c | 302 +++++++------ portable/RVDS/ARM_CM7/r0p1/port.c | 302 +++++++------ 25 files changed, 4816 insertions(+), 3471 deletions(-) diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index 35c832b641e..257aefa55fd 100644 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -38,27 +38,18 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -86,12 +77,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -213,66 +216,66 @@ static void prvTaskExitError( void ) BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -356,7 +359,7 @@ void xPortSysTickHandler( void ) #pragma WEAK( vPortSuppressTicksAndSleep ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -365,22 +368,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm( " cpsid i"); @@ -391,23 +378,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() - * call above. */ + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ __asm( " cpsie i"); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -436,8 +449,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm( " cpsie i"); __asm( " dsb"); __asm( " isb"); @@ -457,27 +470,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -491,11 +500,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -506,13 +534,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm( " cpsie i"); @@ -531,11 +585,11 @@ void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -544,7 +598,7 @@ void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index c5036606bca..1028d83980f 100644 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -42,27 +42,18 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -95,12 +86,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -232,66 +235,66 @@ static void prvTaskExitError( void ) BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -381,7 +384,7 @@ void xPortSysTickHandler( void ) #pragma WEAK( vPortSuppressTicksAndSleep ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -390,22 +393,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm( " cpsid i"); @@ -416,23 +403,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() - * call above. */ + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ __asm( " cpsie i"); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -461,8 +474,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm( " cpsie i"); __asm( " dsb"); __asm( " isb"); @@ -482,27 +495,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,11 +525,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -531,13 +559,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm( " cpsie i"); @@ -556,11 +610,11 @@ void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -569,7 +623,7 @@ void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM0/port.c b/portable/GCC/ARM_CM0/port.c index 18d073fa118..19830730784 100644 --- a/portable/GCC/ARM_CM0/port.c +++ b/portable/GCC/ARM_CM0/port.c @@ -34,27 +34,19 @@ #include "FreeRTOS.h" #include "task.h" -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the NVIC. */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) @@ -69,7 +61,19 @@ * occurred while the SysTick counter is stopped during tickless idle * calculations. */ #ifndef portMISSED_COUNTS_FACTOR - #define portMISSED_COUNTS_FACTOR ( 45UL ) + #define portMISSED_COUNTS_FACTOR ( 94UL ) +#endif + +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) #endif /* Let the user override the pre-loading of the initial LR with the address of @@ -391,11 +395,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -404,7 +408,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ @@ -412,7 +416,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -421,22 +425,6 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -447,23 +435,49 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -492,8 +506,8 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); @@ -513,27 +527,23 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -547,11 +557,30 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -562,15 +591,41 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - /* Exit with interrpts enabled. */ + /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); } } diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index ac882e926c1..f41f215b32b 100644 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -41,27 +41,18 @@ #define configKERNEL_INTERRUPT_PRIORITY 255 #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -89,12 +80,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case it messes up unwinding of the stack in the * debugger. */ @@ -267,66 +270,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -455,7 +458,7 @@ void xPortSysTickHandler( void ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -464,22 +467,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -490,23 +477,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -535,8 +548,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); @@ -556,27 +569,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -590,11 +599,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -605,13 +633,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -629,11 +683,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -642,7 +696,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index ae05a32761e..c62d16049e4 100644 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -38,27 +38,18 @@ #error This port can only be used when the project options are configured to enable hardware floating point support. #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) /* Constants used to detect a Cortex-M7 r0p1 core, which should use the ARM_CM7 @@ -101,7 +92,19 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) + +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif /* Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case it messes up unwinding of the stack in the @@ -303,66 +306,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -517,7 +520,7 @@ void xPortSysTickHandler( void ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -526,22 +529,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -552,23 +539,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -597,8 +610,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); @@ -618,27 +631,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -652,11 +661,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -667,13 +695,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -691,11 +745,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -704,7 +758,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 130e1878d6d..7bfaa1c3c32 100644 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -38,27 +38,18 @@ #error This port can only be used when the project options are configured to enable hardware floating point support. #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -95,7 +86,19 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) + +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif /* Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case it messes up unwinding of the stack in the @@ -291,66 +294,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -507,7 +510,7 @@ void xPortSysTickHandler( void ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -516,22 +519,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -542,23 +529,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -587,8 +600,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); @@ -608,27 +621,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -642,11 +651,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -657,13 +685,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -681,11 +735,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -694,7 +748,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM0/port.c b/portable/IAR/ARM_CM0/port.c index 6fbc8529aa7..4e40442a1cb 100644 --- a/portable/IAR/ARM_CM0/port.c +++ b/portable/IAR/ARM_CM0/port.c @@ -37,26 +37,18 @@ #include "FreeRTOS.h" #include "task.h" -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the NVIC. */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) @@ -82,7 +74,7 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; * occurred while the SysTick counter is stopped during tickless idle * calculations. */ #ifndef portMISSED_COUNTS_FACTOR - #define portMISSED_COUNTS_FACTOR ( 45UL ) + #define portMISSED_COUNTS_FACTOR ( 94UL ) #endif /* The number of SysTick increments that make up one tick period. */ @@ -102,6 +94,18 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; static uint32_t ulStoppedTimerCompensation = 0; #endif /* configUSE_TICKLESS_IDLE */ +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -256,11 +260,11 @@ __weak void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -269,7 +273,7 @@ __weak void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ @@ -277,7 +281,7 @@ __weak void vPortSetupTimerInterrupt( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -286,22 +290,6 @@ __weak void vPortSetupTimerInterrupt( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_interrupt(); @@ -312,23 +300,49 @@ __weak void vPortSetupTimerInterrupt( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() + /* Re-enable interrupts - see comments above the __disable_interrupt() * call above. */ __enable_interrupt(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -357,8 +371,8 @@ __weak void vPortSetupTimerInterrupt( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_interrupt() call above. */ __enable_interrupt(); __DSB(); __ISB(); @@ -378,27 +392,23 @@ __weak void vPortSetupTimerInterrupt( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -412,11 +422,30 @@ __weak void vPortSetupTimerInterrupt( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -427,15 +456,41 @@ __weak void vPortSetupTimerInterrupt( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - /* Exit with interrpts enabled. */ + /* Exit with interrupts enabled. */ __enable_interrupt(); } } diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index fc9eaa40673..56127dfa7a8 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -26,7 +26,6 @@ * */ - #ifndef PORTMACRO_H #define PORTMACRO_H @@ -80,8 +79,8 @@ /* Scheduler utilities. */ extern void vPortYield( void ); - #define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) - #define portNVIC_PENDSVSET 0x10000000 + #define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) + #define portNVIC_PENDSVSET 0x10000000 #define portYIELD() vPortYield() #define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index 92b211c1b82..f8e6a1b59c5 100644 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -41,27 +41,18 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -89,7 +80,7 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ @@ -102,6 +93,18 @@ #define configKERNEL_INTERRUPT_PRIORITY 255 #endif +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -216,66 +219,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -358,7 +361,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -367,22 +370,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_interrupt(); @@ -393,23 +380,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() + /* Re-enable interrupts - see comments above the __disable_interrupt() * call above. */ __enable_interrupt(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -438,8 +451,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_interrupt() call above. */ __enable_interrupt(); __DSB(); __ISB(); @@ -459,27 +472,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -493,11 +502,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -508,13 +536,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_interrupt(); @@ -532,11 +586,11 @@ __weak void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -545,7 +599,7 @@ __weak void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index a870d273b76..3c9e0065f7a 100644 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -45,27 +45,18 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) /* Constants used to detect a Cortex-M7 r0p1 core, which should use the ARM_CM7 @@ -104,12 +95,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -247,66 +250,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -395,7 +398,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -404,22 +407,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_interrupt(); @@ -430,23 +417,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() + /* Re-enable interrupts - see comments above the __disable_interrupt() * call above. */ __enable_interrupt(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -475,8 +488,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_interrupt() call above. */ __enable_interrupt(); __DSB(); __ISB(); @@ -496,27 +509,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -530,11 +539,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -545,13 +573,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_interrupt(); @@ -569,11 +623,11 @@ __weak void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -582,7 +636,7 @@ __weak void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 93ab626e52e..a7300b2f252 100644 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -45,27 +45,18 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -98,12 +89,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -235,66 +238,66 @@ BaseType_t xPortStartScheduler( void ) configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -383,7 +386,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -392,22 +395,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_interrupt(); @@ -418,23 +405,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_interrupt() + /* Re-enable interrupts - see comments above the __disable_interrupt() * call above. */ __enable_interrupt(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -463,8 +476,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_interrupt() call above. */ __enable_interrupt(); __DSB(); __ISB(); @@ -484,27 +497,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -518,11 +527,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -533,13 +561,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_interrupt(); @@ -557,11 +611,11 @@ __weak void vPortSetupTimerInterrupt( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -570,7 +624,7 @@ __weak void vPortSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } /*-----------------------------------------------------------*/ diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 028140572ad..a8e7e7ca813 100644 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -34,28 +34,18 @@ #include "FreeRTOS.h" #include "task.h" - -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -88,7 +78,19 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) + +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif /* Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case it messes up unwinding of the stack in the @@ -298,70 +300,70 @@ BaseType_t xPortStartScheduler( void ) configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); + /* The kernel interrupt priority should be set to the lowest + * priority. */ + configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -510,7 +512,7 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -519,22 +521,6 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm { @@ -551,18 +537,7 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm { "cpsie i" @@ -570,6 +545,43 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -604,8 +616,8 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ __asm { "cpsie i" }; @@ -637,27 +649,23 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -671,11 +679,30 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -686,13 +713,39 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm { @@ -714,11 +767,11 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Reset SysTick. */ @@ -727,7 +780,7 @@ void xPortSysTickHandler( void ) iv IVT_INT_SysTick ics ICS_AUTO /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } #endif /* configOVERRIDE_DEFAULT_TICK_CONFIGURATION */ diff --git a/portable/RVDS/ARM_CM0/port.c b/portable/RVDS/ARM_CM0/port.c index bb6dfcd31ca..50cd9ba3c34 100644 --- a/portable/RVDS/ARM_CM0/port.c +++ b/portable/RVDS/ARM_CM0/port.c @@ -45,6 +45,8 @@ #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) @@ -59,12 +61,24 @@ * occurred while the SysTick counter is stopped during tickless idle * calculations. */ #ifndef portMISSED_COUNTS_FACTOR - #define portMISSED_COUNTS_FACTOR ( 45UL ) + #define portMISSED_COUNTS_FACTOR ( 94UL ) #endif /* Constants used with memory barrier intrinsics. */ #define portSY_FULL_READ_WRITE ( 15 ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* Legacy macro for backward compatibility only. This macro used to be used to * replace the function that configures the clock used to generate the tick * interrupt (prvSetupTimerInterrupt()), but now the function is declared weak so @@ -360,18 +374,20 @@ void xPortSysTickHandler( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - ulTimerCountsForOneTick = ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ); + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ - /* Stop and reset the SysTick. */ + /* Stop and clear the SysTick. */ portNVIC_SYSTICK_CTRL_REG = 0UL; portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; /* Configure SysTick to interrupt at the requested rate. */ - portNVIC_SYSTICK_LOAD_REG = ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } #endif /* configOVERRIDE_DEFAULT_TICK_CONFIGURATION */ @@ -381,7 +397,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -390,22 +406,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_irq(); @@ -416,23 +416,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_irq() call - * above. */ + /* Re-enable interrupts - see comments above the __disable_irq() + * call above. */ __enable_irq(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -443,10 +469,10 @@ void xPortSysTickHandler( void ) /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can * set its parameter to 0 to indicate that its implementation contains * its own wait for interrupt or wait for event instruction, and so wfi - * should not be executed again. However, the original expected idle + * should not be executed again. However, the original expected idle * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -461,8 +487,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_irq() call above. */ __enable_irq(); __dsb( portSY_FULL_READ_WRITE ); __isb( portSY_FULL_READ_WRITE ); @@ -482,27 +508,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,11 +538,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -531,15 +572,41 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD - * again, then set portNVIC_SYSTICK_LOAD back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - /* Exit with interrpts enabled. */ + /* Exit with interrupts enabled. */ __enable_irq(); } } diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index 6b5efb27e93..b47d3b7e3e3 100644 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -42,17 +42,6 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Legacy macro for backward compatibility only. This macro used to be used to * replace the function that configures the clock used to generate the tick * interrupt (prvSetupTimerInterrupt()), but now the function is declared weak so @@ -68,10 +57,12 @@ #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -99,12 +90,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -262,70 +265,70 @@ __asm void prvStartFirstTask( void ) BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); + /* The kernel interrupt priority should be set to the lowest + * priority. */ + configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -450,7 +453,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -459,22 +462,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_irq(); @@ -485,23 +472,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_irq() call - * above. */ + /* Re-enable interrupts - see comments above the __disable_irq() + * call above. */ __enable_irq(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -530,8 +543,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_irq() call above. */ __enable_irq(); __dsb( portSY_FULL_READ_WRITE ); __isb( portSY_FULL_READ_WRITE ); @@ -551,27 +564,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -585,11 +594,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -600,13 +628,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_irq(); @@ -627,11 +681,11 @@ void xPortSysTickHandler( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -640,7 +694,7 @@ void xPortSysTickHandler( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } #endif /* configOVERRIDE_DEFAULT_TICK_CONFIGURATION */ diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index 443ec3e27ea..40e3ac958db 100644 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -42,17 +42,6 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* Legacy macro for backward compatibility only. This macro used to be used to * replace the function that configures the clock used to generate the tick * interrupt (prvSetupTimerInterrupt()), but now the function is declared weak so @@ -68,10 +57,12 @@ #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) /* Constants used to detect a Cortex-M7 r0p1 core, which should use the ARM_CM7 @@ -110,12 +101,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -324,70 +327,70 @@ BaseType_t xPortStartScheduler( void ) configASSERT( portCPUID != portCORTEX_M7_r0p0_ID ); #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); + /* The kernel interrupt priority should be set to the lowest + * priority. */ + configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -542,7 +545,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -551,22 +554,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_irq(); @@ -577,23 +564,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_irq() call - * above. */ + /* Re-enable interrupts - see comments above the __disable_irq() + * call above. */ __enable_irq(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -622,8 +635,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_irq() call above. */ __enable_irq(); __dsb( portSY_FULL_READ_WRITE ); __isb( portSY_FULL_READ_WRITE ); @@ -643,27 +656,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -677,11 +686,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -692,13 +720,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_irq(); @@ -719,11 +773,11 @@ void xPortSysTickHandler( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -732,7 +786,7 @@ void xPortSysTickHandler( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } #endif /* configOVERRIDE_DEFAULT_TICK_CONFIGURATION */ diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index 910214c1f6e..fe869df440d 100644 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -42,17 +42,6 @@ #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the same - * as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif - /* The __weak attribute does not work as you might expect with the Keil tools * so the configOVERRIDE_DEFAULT_TICK_CONFIGURATION constant must be set to 1 if * the application writer wants to provide their own implementation of @@ -68,10 +57,12 @@ #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) /* ...then bits in the registers. */ +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) #define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) #define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) @@ -104,12 +95,24 @@ /* A fiddle factor to estimate the number of SysTick counts that would have * occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /* * Setup the timer to generate the tick interrupts. The implementation in this * file is weak to allow application writers to change the timer used to @@ -308,70 +311,70 @@ __asm void prvEnableVFP( void ) BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); + /* The kernel interrupt priority should be set to the lowest + * priority. */ + configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; - } + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -528,7 +531,7 @@ void xPortSysTickHandler( void ) __weak void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -537,22 +540,6 @@ void xPortSysTickHandler( void ) xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for - * is accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __disable_irq(); @@ -563,23 +550,49 @@ void xPortSysTickHandler( void ) * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above __disable_irq() call - * above. */ + /* Re-enable interrupts - see comments above the __disable_irq() + * call above. */ __enable_irq(); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -608,8 +621,8 @@ void xPortSysTickHandler( void ) configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. see comments above - * __disable_interrupt() call above. */ + * out of sleep mode to execute immediately. See comments above + * the __disable_irq() call above. */ __enable_irq(); __dsb( portSY_FULL_READ_WRITE ); __isb( portSY_FULL_READ_WRITE ); @@ -629,27 +642,23 @@ void xPortSysTickHandler( void ) * be, but using the tickless mode will inevitably result in some tiny * drift of the time maintained by the kernel with respect to calendar * time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is yet - * to count to zero (in which case an interrupt other than the SysTick - * must have brought the system out of sleep mode). */ + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -663,11 +672,30 @@ void xPortSysTickHandler( void ) } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -678,13 +706,39 @@ void xPortSysTickHandler( void ) portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __enable_irq(); @@ -705,11 +759,11 @@ void xPortSysTickHandler( void ) { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and clear the SysTick. */ @@ -718,7 +772,7 @@ void xPortSysTickHandler( void ) /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); } #endif /* configOVERRIDE_DEFAULT_TICK_CONFIGURATION */ From 9965dcd79f7c48597b8369d73a4f588aa525fb35 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 6 Oct 2022 13:04:20 +0800 Subject: [PATCH 101/164] Merge SMP commit 45dd83a8e * 45dd83a8e | 2022-06-09 | Fix RP2040 assertion due to yield spin lock info being wrongly shared between multiple cores (#501) --- portable/ThirdParty/GCC/RP2040/port.c | 44 ++++++++++++--------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index f52b0d8d0a7..0022fcbf3f0 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -133,8 +133,8 @@ static UBaseType_t uxCriticalNesting; static spin_lock_t * pxCrossCoreSpinLock; #endif /* LIB_PICO_MULTICORE */ - static spin_lock_t * pxYieldSpinLocks[ configNUM_CORES ]; - static uint32_t ulYieldSpinLockSaveValue; + static spin_lock_t * pxYieldSpinLock[ configNUM_CORES ]; + static uint32_t ulYieldSpinLockSaveValue[ configNUM_CORES ]; #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* @@ -452,7 +452,7 @@ void vPortYield( void ) * * This should only be checked with interrupt disabled in SMP. */ - configASSERT( pxYieldSpinLocks[ get_core_num() ] == NULL ); + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* Set a PendSV to request a context switch. */ @@ -485,16 +485,15 @@ void vPortExitCritical( void ) } } -void vPortEnableInterrupts() { +void vPortEnableInterrupts( void ) +{ #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) + int xCoreID = portGET_CORE_ID(); + if( pxYieldSpinLock[xCoreID] ) { - BaseType_t xCoreID = get_core_num(); - spin_lock_t * pxYieldSpinLock = pxYieldSpinLocks[ xCoreID ]; - if( pxYieldSpinLocks[ xCoreID ] ) - { - pxYieldSpinLocks[ xCoreID ] = NULL; - spin_unlock( pxYieldSpinLock, ulYieldSpinLockSaveValue ); - } + spin_lock_t* const pxTmpLock = pxYieldSpinLock[xCoreID]; + pxYieldSpinLock[xCoreID] = NULL; + spin_unlock( pxTmpLock, ulYieldSpinLockSaveValue[xCoreID] ); } #endif __asm volatile ( " cpsie i " ::: "memory" ); @@ -993,17 +992,18 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - BaseType_t xCoreID = get_core_num(); - configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); // we want to hold the lock until the event bits have been set; since interrupts are currently disabled // by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when // the scheduler is unlocked during this call configASSERT(pxLock->spin_lock); - pxYieldSpinLocks[ xCoreID ] = pxLock->spin_lock; - ulYieldSpinLockSaveValue = ulSave; + int xCoreID = portGET_CORE_ID(); + pxYieldSpinLock[xCoreID] = pxLock->spin_lock; + ulYieldSpinLockSaveValue[xCoreID] = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, portMAX_DELAY); + } } @@ -1061,8 +1061,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - BaseType_t xCoreID = get_core_num(); - configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil ); if( uxTicksToWait ) @@ -1071,17 +1070,12 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) * by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when * the scheduler is unlocked during this call */ configASSERT(pxLock->spin_lock); - pxYieldSpinLocks[ xCoreID ] = pxLock->spin_lock; - ulYieldSpinLockSaveValue = ulSave; + int xCoreID = portGET_CORE_ID(); + pxYieldSpinLock[xCoreID] = pxLock->spin_lock; + ulYieldSpinLockSaveValue[xCoreID] = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, uxTicksToWait ); - - #if ( configNUM_CORES == 1 ) - /* sanity check that interrupts were disabled, then re-enabled during the call, which will have - * taken care of the yield. This should be checked with interrupt were disabled in SMP. */ - configASSERT( pxYieldSpinLocks[ xCoreID ] == NULL ); - #endif } else { From 8294974d5deeeda54607acc6602fce34d4cc0de6 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 6 Oct 2022 13:12:56 +0800 Subject: [PATCH 102/164] Merge SMP b87dfa3e9 * b87dfa3e9 | 2022-06-04 | RP2040: Allow FreeRTOS to be added to the parent CMake project post initialization of the Pico SDK --- portable/ThirdParty/GCC/RP2040/README.md | 13 +++++++------ portable/ThirdParty/GCC/RP2040/library.cmake | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/README.md b/portable/ThirdParty/GCC/RP2040/README.md index c50cb4e388c..0583fac0bfd 100644 --- a/portable/ThirdParty/GCC/RP2040/README.md +++ b/portable/ThirdParty/GCC/RP2040/README.md @@ -1,12 +1,13 @@ ## Overview -This directory provides a FreeRTOS-Kernel port that can be used with the Raspberry Pi Pico SDK. It supports: +This directory provides an SMP FreeRTOS-Kernel port that can be used with the Raspberry Pi Pico SDK. It supports: * Simple CMake INTERFACE libraries, to provide the FreeRTOS-Kernel and also the individual allocator types, without copying code into the user's project. - * Running the FreeRTOS-Kernel and tasks on either core 0 or core 1 - * Use of SDK synchronization primitives (such as mutexes, semaphores, queues from pico_sync) between FreeRTOS tasks and code executing on the other core, or in IRQ handlers. + * Running the FreeRTOS-Kernel and tasks on either core 0 or core 1, or both. + * Use of SDK synchronization primitives (such as mutexes, semaphores, queues from pico_sync) between FreeRTOS tasks and code executing on a non FreeRTOS core, or in IRQ handlers. -Note that a FreeRTOS SMP version of this port is also available in the FreeRTOS-Kernel smp branch, which additionally supports utilizing both RP2040 CPU cores for FreeRTOS tasks simultaneously. +Note that whilst this SMP version can be run on just a single (either) core, it is probably +more efficient to use the non SMP version in the main FreeRTOS-Kernel branch in that case. ## Using this port @@ -14,7 +15,7 @@ You can copy [FreeRTOS-Kernel-import.cmake](FreeRTOS-Kernel-import.cmake) into y add the following in your `CMakeLists.txt`: ```cmake -include(FreeRTOS_Kernel_import.cmake) +import(FreeRTOS_Kernel_import.cmake) ``` This will locate the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the @@ -39,4 +40,4 @@ Some additional `config` options are defined [here](include/rp2040_config.h) whi ## Known Limitations -- Tickless idle has not currently been tested, and is likely non-functional +- Tickless idle has not currently been tested, and is likely non-functional \ No newline at end of file diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index 6b125fab8b8..a562a144cc6 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -27,8 +27,7 @@ target_sources(FreeRTOS-Kernel INTERFACE ) target_include_directories(FreeRTOS-Kernel INTERFACE - ${CMAKE_CURRENT_LIST_DIR}/include - ${FREERTOS_CONFIG_FILE_DIRECTORY}) + ${CMAKE_CURRENT_LIST_DIR}/include) target_link_libraries(FreeRTOS-Kernel INTERFACE FreeRTOS-Kernel-Core @@ -40,6 +39,7 @@ target_link_libraries(FreeRTOS-Kernel INTERFACE target_compile_definitions(FreeRTOS-Kernel INTERFACE LIB_FREERTOS_KERNEL=1 + FREE_RTOS_KERNEL_SMP=1 ) add_library(FreeRTOS-Kernel-Static INTERFACE) From b9dfba328af24b3d25fbcd1127c6e9ed268c7f23 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 6 Oct 2022 18:45:09 +0800 Subject: [PATCH 103/164] Merge SMP 13f034eb7 * 13f034eb7 | 2022-06-24 | RP2040: Fix compiler warning and comment (#509) --- portable/ThirdParty/GCC/RP2040/port.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index 0022fcbf3f0..251e6159ec8 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -128,10 +128,10 @@ static UBaseType_t uxCriticalNesting; #define pEventGroup (&xStaticEventGroup) #endif /* configSUPPORT_STATIC_ALLOCATION */ static EventGroupHandle_t xEventGroup; - #if ( LIB_PICO_MULTICORE == 1 ) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) static EventBits_t uxCrossCoreEventBits; static spin_lock_t * pxCrossCoreSpinLock; - #endif /* LIB_PICO_MULTICORE */ + #endif static spin_lock_t * pxYieldSpinLock[ configNUM_CORES ]; static uint32_t ulYieldSpinLockSaveValue[ configNUM_CORES ]; @@ -446,12 +446,9 @@ void vPortEndScheduler( void ) void vPortYield( void ) { - #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) && ( confgNUM_CORES == 1 ) + #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) /* We are not in an ISR, and pxYieldSpinLock is always dealt with and - * cleared interrupts are re-enabled, so should be NULL. - * - * This should only be checked with interrupt disabled in SMP. - */ + * cleared when interrupts are re-enabled, so should be NULL */ configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); #endif /* configSUPPORT_PICO_SYNC_INTEROP */ @@ -985,6 +982,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave ) { configASSERT( !portCHECK_IF_IN_ISR() ); + // note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined if( !portIS_FREE_RTOS_CORE() ) { spin_unlock(pxLock->spin_lock, ulSave ); @@ -1029,7 +1027,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) else { __sev(); - #if ( LIB_PICO_MULTICORE == 1) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) /* We could sent the bits across the FIFO which would have required us to block here if the FIFO was full, * or we could have just set all bits on the other side, however it seems reasonable instead to take * the hit of another spin lock to protect an accurate bit set. */ @@ -1045,7 +1043,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } /* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */ sio_hw->fifo_wr = 0; - #endif /* LIB_PICO_MULTICORE */ + #endif /* portRUNNING_ON_BOTH_CORES == 0 */ spin_unlock(pxLock->spin_lock, ulSave); } } @@ -1061,6 +1059,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { + configASSERT( portIS_FREE_RTOS_CORE() ); configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil ); @@ -1101,7 +1100,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* This must be done even before the scheduler is started, as the spin lock * is used by the overrides of the SDK wait/notify primitives */ - #if ( LIB_PICO_MULTICORE == 1 ) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) pxCrossCoreSpinLock = spin_lock_instance( next_striped_spin_lock_num() ); #endif /* portRUNNING_ON_BOTH_CORES */ From 136385a3cab53850ea9bb85ec1d8a3686eb7398d Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Fri, 7 Oct 2022 10:17:27 +0800 Subject: [PATCH 104/164] Fix compiler warning and spelling --- .github/lexicon.txt | 28 ++ include/FreeRTOS.h | 30 +- include/task.h | 289 ++++++++-------- include/timers.h | 1 + tasks.c | 789 +++++++++++++++++++++++++------------------- timers.c | 182 +++++----- 6 files changed, 728 insertions(+), 591 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 6e04c246c3c..b815e2faca5 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1097,6 +1097,7 @@ mikroc min mingw minilistitem +minimalidle mips misadd misc @@ -1547,6 +1548,7 @@ prvtickcount prvtimercallback prvwritebytestobuffer prvwritemessagetobuffer +prvyieldfortask prvyieldhandler ps psp @@ -1763,6 +1765,7 @@ rdc rdr rdrf rdy +reacquisition readbit readme readvalue @@ -2405,6 +2408,7 @@ uxbitstoset uxbitstowait uxbitstowaitfor uxcontrolbits +uxcoreaffinitymask uxcriticalnesting uxcurrenteventbits uxcurrentnumberoftasks @@ -2428,9 +2432,12 @@ uxlength uxlistremove uxmaxcount uxmessageswaiting +uxnetworkingcoreaffinitymask uxnewpriority uxoriginalpriority +uxpendedcounts uxportcomparesetextram +uxprevschedulersuspended uxpriority uxprioritytouse uxqueue @@ -2480,6 +2487,7 @@ vapplicationgettimertaskmemory vapplicationidlehook vapplicationirqhandler vapplicationmallocfailedhook +vapplicationminimalidlehook vapplicationsetuptickinterrupt vapplicationsetupticktimerinterrupt vapplicationsetuptimerinterrupt @@ -2582,12 +2590,15 @@ vstreambufferdelete vtask vtaskallocatempuregions vtaskcode +vtaskcoreaffinityget +vtaskcoreaffinityset vtaskdelay vtaskdelayuntil vtaskdelete vtaskendscheduler vtaskentercritical vtaskexitcritical +vtaskexitcriticalfromisr vtaskfunction vtaskgetinfo vtaskgetruntimestats @@ -2596,6 +2607,8 @@ vtasknotify vtasknotifygivefromisr vtasknotifygiveindexedfromisr vtaskplaceoneventlist +vtaskpreemptiondisable +vtaskpreemptionenable vtaskpriorityset vtaskremovefromunorderedeventlist vtaskresume @@ -2609,6 +2622,7 @@ vtasksteptick vtasksuspend vtasksuspendall vtaskswitchcontext +vtaskswitchcontextforcore vtaskusesdpfpu vtickisr vtimercallback @@ -2706,6 +2720,8 @@ xcommandtime xcommsrxqueue xconsttickcount xcopyposition +xcore +xcoreid xcoroutinecreate xcoroutinepreviouslywoken xcoroutinequeue @@ -2787,6 +2803,8 @@ xhigherpriorittaskwoken xhigherprioritytaskwoken xhigherprioritytaskwokenbypost xidletaskhandle +xidletaskhandles +xidletcbbuffers xilinx xindex xinheritanceoccurred @@ -2806,6 +2824,8 @@ xlastwaketime xlength xlist xlistend +xlowestpriority +xlowestprioritycore xmair xmaxcount xmaxexpirycountbeforestopping @@ -2843,6 +2863,7 @@ xmutexbuffer xmutexholder xn xnearstartscheduler +xnetworkingtaskhandle xnewperiod xnewqueue xnextexpiretime @@ -2875,6 +2896,7 @@ xportregistercinterrupthandler xportregisterdump xportstartfirsttask xportstartscheduler +xpreemptiondisable xpsr xqueue xqueueaddtoset @@ -2985,6 +3007,7 @@ xtail xtal xtask xtaskabortdelay +xtaskattribute xtaskbuffer xtaskcallapplicationtaskhook xtaskcatchupticks @@ -3030,6 +3053,7 @@ xtasknumber xtaskremovefromeventlist xtaskresumeall xtaskresumefromisr +xtaskrunstate xtaskswaitingforbits xtaskswaitingtermination xtaskswaitingtoreceive @@ -3066,6 +3090,7 @@ xtimercreate xtimercreated xtimercreatestatic xtimerdelete +xtimergenericcommand xtimergetexpirytime xtimergetperiod xtimergetreloadmode @@ -3100,5 +3125,8 @@ xwantedsize xwasdelayed xwritevalue xxr +xyieldfortask xyieldpending +xyieldpendings xzr +yeilding diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index fa7dac347ba..698beaaeb44 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -365,7 +365,7 @@ #ifndef portGET_CORE_ID #if ( configNUM_CORES == 1 ) - #define portGET_CORE_ID() 0 + #define portGET_CORE_ID() 0 #else #error configNUM_CORES is set to more than 1 then portGET_CORE_ID must also be defined. #endif /* configNUM_CORES */ @@ -375,7 +375,7 @@ #ifndef portYIELD_CORE #if ( configNUM_CORES == 1 ) - #define portYIELD_CORE( x ) portYIELD() + #define portYIELD_CORE( x ) portYIELD() #else #error configNUM_CORES is set to more than 1 then portYIELD_CORE must also be defined. #endif /* configNUM_CORES */ @@ -388,7 +388,7 @@ #error portSET_INTERRUPT_MASK is required in SMP #endif -#endif /* portSET_INTERRUPT_MASK */ +#endif /* portSET_INTERRUPT_MASK */ #ifndef portCLEAR_INTERRUPT_MASK @@ -396,7 +396,7 @@ #error portCLEAR_INTERRUPT_MASK is required in SMP #endif -#endif /* portCLEAR_INTERRUPT_MASK */ +#endif /* portCLEAR_INTERRUPT_MASK */ #ifndef portRELEASE_TASK_LOCK @@ -406,7 +406,7 @@ #error portRELEASE_TASK_LOCK is required in SMP #endif -#endif /* portRELEASE_TASK_LOCK */ +#endif /* portRELEASE_TASK_LOCK */ #ifndef portGET_TASK_LOCK @@ -416,7 +416,7 @@ #error portGET_TASK_LOCK is required in SMP #endif -#endif /* portGET_TASK_LOCK */ +#endif /* portGET_TASK_LOCK */ #ifndef portRELEASE_ISR_LOCK @@ -426,7 +426,7 @@ #error portRELEASE_ISR_LOCK is required in SMP #endif -#endif /* portRELEASE_ISR_LOCK */ +#endif /* portRELEASE_ISR_LOCK */ #ifndef portGET_ISR_LOCK @@ -436,7 +436,7 @@ #error portGET_ISR_LOCK is required in SMP #endif -#endif /* portGET_ISR_LOCK */ +#endif /* portGET_ISR_LOCK */ /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 @@ -453,10 +453,10 @@ #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined. #endif /* configTIMER_TASK_STACK_DEPTH */ - #ifndef portTIMER_CALLBACK_ATTRIBUTE - #define portTIMER_CALLBACK_ATTRIBUTE - #endif /* portTIMER_CALLBACK_ATTRIBUTE */ - + #ifndef portTIMER_CALLBACK_ATTRIBUTE + #define portTIMER_CALLBACK_ATTRIBUTE + #endif /* portTIMER_CALLBACK_ATTRIBUTE */ + #endif /* configUSE_TIMERS */ #ifndef portSET_INTERRUPT_MASK_FROM_ISR @@ -1107,15 +1107,15 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif -#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) +#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable #endif -#if( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) +#if ( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) #error configUSE_PREEMPTION must be set to 1 to use task preemption disable #endif -#if( ( configNUM_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) +#if ( ( configNUM_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) #error configUSE_TASK_PREEMPTION_DISABLE is not supported in single core FreeRTOS #endif diff --git a/include/task.h b/include/task.h index 18b7bce2c9d..71acb35847d 100644 --- a/include/task.h +++ b/include/task.h @@ -190,7 +190,7 @@ typedef enum * * \ingroup TaskUtils */ -#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) +#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) /** * task. h @@ -260,7 +260,7 @@ typedef enum #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) /* Check if core value is valid */ -#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) +#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) /*----------------------------------------------------------- * TASK CREATION API @@ -371,7 +371,7 @@ typedef enum #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const configSTACK_DEPTH_TYPE usStackDepth, void * const pvParameters, UBaseType_t uxPriority, @@ -499,7 +499,7 @@ typedef enum #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, @@ -1266,155 +1266,160 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; #if ( configUSE_CORE_AFFINITY == 1 ) - /** - * @brief Sets the core affinity mask for a task. - * - * It sets the cores on which a task can run. configUSE_CORE_AFFINITY must - * be defined as 1 for this function to be available. - * - * @param xTask The handle of the task to set the core affinity mask for. - * Passing NULL will set the core affinity mask for the calling task. - * - * @param uxCoreAffinityMask A bitwise value that indicates the cores on - * which the task can run. Cores are numbered from 0 to configNUM_CORES - 1. - * For example, to ensure that a task can run on core 0 and core 1, set - * uxCoreAffinityMask to 0x03. - * - * Example usage: - * - * // The function that creates task. - * void vAFunction( void ) - * { - * TaskHandle_t xHandle; - * UBaseType_t uxCoreAffinityMask; - * - * // Create a task, storing the handle. - * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); - * - * // Define the core affinity mask such that this task can only run - * // on core 0 and core 2. - * uxCoreAffinityMask = ( ( 1 << 0 ) | ( 1 << 2 ) ); - * - * //Set the core affinity mask for the task. - * vTaskCoreAffinitySet( xHandle, uxCoreAffinityMask ); - * } - */ - void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ); + +/** + * @brief Sets the core affinity mask for a task. + * + * It sets the cores on which a task can run. configUSE_CORE_AFFINITY must + * be defined as 1 for this function to be available. + * + * @param xTask The handle of the task to set the core affinity mask for. + * Passing NULL will set the core affinity mask for the calling task. + * + * @param uxCoreAffinityMask A bitwise value that indicates the cores on + * which the task can run. Cores are numbered from 0 to configNUM_CORES - 1. + * For example, to ensure that a task can run on core 0 and core 1, set + * uxCoreAffinityMask to 0x03. + * + * Example usage: + * + * // The function that creates task. + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * // Define the core affinity mask such that this task can only run + * // on core 0 and core 2. + * uxCoreAffinityMask = ( ( 1 << 0 ) | ( 1 << 2 ) ); + * + * //Set the core affinity mask for the task. + * vTaskCoreAffinitySet( xHandle, uxCoreAffinityMask ); + * } + */ + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ); #endif #if ( configUSE_CORE_AFFINITY == 1 ) - /** - * @brief Gets the core affinity mask for a task. - * - * configUSE_CORE_AFFINITY must be defined as 1 for this function to be - * available. - * - * @param xTask The handle of the task to get the core affinity mask for. - * Passing NULL will get the core affinity mask for the calling task. - * - * @return The core affinity mask which is a bitwise value that indicates - * the cores on which a task can run. Cores are numbered from 0 to - * configNUM_CORES - 1. For example, if a task can run on core 0 and core 1, - * the core affinity mask is 0x03. - * - * Example usage: - * - * // Task handle of the networking task - it is populated elsewhere. - * TaskHandle_t xNetworkingTaskHandle; - * - * void vAFunction( void ) - * { - * TaskHandle_t xHandle; - * UBaseType_t uxNetworkingCoreAffinityMask; - * - * // Create a task, storing the handle. - * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); - * - * //Get the core affinity mask for the networking task. - * uxNetworkingCoreAffinityMask = vTaskCoreAffinityGet( xNetworkingTaskHandle ); - * - * // Here is a hypothetical scenario, just for the example. Assume that we - * // have 2 cores - Core 0 and core 1. We want to pin the application task to - * // the core different than the networking task to ensure that the - * // application task does not interfere with networking. - * if( ( uxNetworkingCoreAffinityMask & ( 1 << 0 ) ) != 0 ) - * { - * // The networking task can run on core 0, pin our task to core 1. - * vTaskCoreAffinitySet( xHandle, ( 1 << 1 ) ); - * } - * else - * { - * // Otherwise, pin our task to core 0. - * vTaskCoreAffinitySet( xHandle, ( 1 << 0 ) ); - * } - * } - */ + +/** + * @brief Gets the core affinity mask for a task. + * + * configUSE_CORE_AFFINITY must be defined as 1 for this function to be + * available. + * + * @param xTask The handle of the task to get the core affinity mask for. + * Passing NULL will get the core affinity mask for the calling task. + * + * @return The core affinity mask which is a bitwise value that indicates + * the cores on which a task can run. Cores are numbered from 0 to + * configNUM_CORES - 1. For example, if a task can run on core 0 and core 1, + * the core affinity mask is 0x03. + * + * Example usage: + * + * // Task handle of the networking task - it is populated elsewhere. + * TaskHandle_t xNetworkingTaskHandle; + * + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxNetworkingCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * //Get the core affinity mask for the networking task. + * uxNetworkingCoreAffinityMask = vTaskCoreAffinityGet( xNetworkingTaskHandle ); + * + * // Here is a hypothetical scenario, just for the example. Assume that we + * // have 2 cores - Core 0 and core 1. We want to pin the application task to + * // the core different than the networking task to ensure that the + * // application task does not interfere with networking. + * if( ( uxNetworkingCoreAffinityMask & ( 1 << 0 ) ) != 0 ) + * { + * // The networking task can run on core 0, pin our task to core 1. + * vTaskCoreAffinitySet( xHandle, ( 1 << 1 ) ); + * } + * else + * { + * // Otherwise, pin our task to core 0. + * vTaskCoreAffinitySet( xHandle, ( 1 << 0 ) ); + * } + * } + */ UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ); #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /** - * @brief Disables preemption for a task. - * - * @param xTask The handle of the task to disable preemption. Passing NULL - * disables preemption for the calling task. - * - * Example usage: - * - * void vTaskCode( void *pvParameters ) - * { - * // Silence warnings about unused parameters. - * ( void ) pvParameters; - * - * for( ;; ) - * { - * // ... Perform some function here. - * - * // Disable preemption for this task. - * vTaskPreemptionDisable( NULL ); - * - * // The task will not be preempted when it is executing in this portion ... - * - * // ... until the preemption is enabled again. - * vTaskPreemptionEnable( NULL ); - * - * // The task can be preempted when it is executing in this portion. - * } - * } - */ + +/** + * @brief Disables preemption for a task. + * + * @param xTask The handle of the task to disable preemption. Passing NULL + * disables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ void vTaskPreemptionDisable( const TaskHandle_t xTask ); #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /** - * @brief Enables preemption for a task. - * - * @param xTask The handle of the task to enable preemption. Passing NULL - * enables preemption for the calling task. - * - * Example usage: - * - * void vTaskCode( void *pvParameters ) - * { - * // Silence warnings about unused parameters. - * ( void ) pvParameters; - * - * for( ;; ) - * { - * // ... Perform some function here. - * - * // Disable preemption for this task. - * vTaskPreemptionDisable( NULL ); - * - * // The task will not be preempted when it is executing in this portion ... - * - * // ... until the preemption is enabled again. - * vTaskPreemptionEnable( NULL ); - * - * // The task can be preempted when it is executing in this portion. - * } - * } - */ + +/** + * @brief Enables preemption for a task. + * + * @param xTask The handle of the task to enable preemption. Passing NULL + * enables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ void vTaskPreemptionEnable( const TaskHandle_t xTask ); #endif diff --git a/include/timers.h b/include/timers.h index fba7b76dc4b..6bbb9f687de 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1328,6 +1328,7 @@ TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; * for use by the kernel only. */ BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; + /* * Splitting the xTimerGenericCommand into two sub functions and making it a macro * removes a recursion path when called from ISRs. This is primarily for the XCore diff --git a/tasks.c b/tasks.c index b0d57265429..c923cb45abb 100644 --- a/tasks.c +++ b/tasks.c @@ -138,23 +138,23 @@ /*-----------------------------------------------------------*/ #if ( configNUM_CORES == 1 ) - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority = uxTopReadyPriority; \ - \ - /* Find the highest priority queue that contains ready tasks. */ \ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ - { \ - configASSERT( uxTopPriority ); \ - --uxTopPriority; \ - } \ - \ - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ - #endif + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + * the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #endif /* if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -265,15 +265,15 @@ /* Returns pdTRUE if the task is actively running and not scheduled to yield. */ #if ( configNUM_CORES == 1 ) - #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) - #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) + #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) + #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) #else - #define taskTASK_IS_RUNNING( pxTCB ) ( ( 0 <= pxTCB->xTaskRunState ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) - #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( 0 <= pxTCB->xTaskRunState ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) + #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) #endif /* Indicates that the task is an Idle task. */ -#define taskATTRIBUTE_IS_IDLE ( BaseType_t ) ( 1UL << 0 ) +#define taskATTRIBUTE_IS_IDLE ( BaseType_t ) ( 1UL << 0 ) typedef BaseType_t TaskRunning_t; @@ -299,8 +299,8 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /*< Points to the start of the stack. */ #if ( configNUM_CORES > 1 ) - volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ - BaseType_t xTaskAttribute; /*< Used to identify the idle tasks. */ + volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ + BaseType_t xTaskAttribute; /*< Used to identify the idle tasks. */ #endif char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ @@ -371,7 +371,7 @@ typedef tskTCB TCB_t; #if ( configNUM_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else - portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -415,7 +415,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -450,18 +450,20 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ + /* * Creates the idle tasks during scheduler start */ static BaseType_t prvCreateIdleTasks( void ); #if ( configNUM_CORES > 1 ) - /* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a critical section and yields if so. - */ + +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields if so. + */ static void prvCheckForRunStateChange( void ); -#endif /* ( configNUM_CORES > 1 ) */ +#endif /* ( configNUM_CORES > 1 ) */ /* * Yields the given core. @@ -719,7 +721,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } } -#endif +#endif /* if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ static void prvYieldCore( BaseType_t xCoreID ) @@ -752,7 +754,7 @@ static void prvYieldCore( BaseType_t xCoreID ) } } } - #endif + #endif /* if ( configNUM_CORES == 1 ) */ } /*-----------------------------------------------------------*/ @@ -783,7 +785,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, return xLowestPriorityCore; } - #else + #else /* if ( configNUM_CORES == 1 ) */ { BaseType_t xLowestPriority; BaseType_t xTaskPriority; @@ -795,6 +797,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + /* No task should yield for this one if it is a lower priority * than priority level of currently ready tasks. */ if( pxTCB->uxPriority >= uxTopReadyPriority ) @@ -842,19 +845,19 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, } #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* Yield all currently running non-idle tasks with a priority lower than + * the task that needs to run. */ + if( ( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) { - /* Yield all currently running non-idle tasks with a priority lower than - * the task that needs to run. */ - if( ( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) - { - prvYieldCore( xCoreID ); - xYieldCount++; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + prvYieldCore( xCoreID ); + xYieldCount++; + } + else + { + mtCOVERAGE_TEST_MARKER(); } + } #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 */ } else @@ -873,7 +876,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) /* Verify that the calling core always yields to higher priority tasks. */ - if( ( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE && + if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) && ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) { configASSERT( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE || taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ); @@ -883,7 +886,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, return xLowestPriorityCore; } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ } /*-----------------------------------------------------------*/ @@ -895,6 +898,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxCurrentPriority = uxTopReadyPriority; BaseType_t xTaskScheduled = pdFALSE; BaseType_t xDecrementTopPriority = pdTRUE; + #if ( configUSE_CORE_AFFINITY == 1 ) TCB_t * pxPreviousTCB = NULL; #endif @@ -905,15 +909,15 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, while( xTaskScheduled == pdFALSE ) { #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + if( uxCurrentPriority < uxTopReadyPriority ) { - if( uxCurrentPriority < uxTopReadyPriority ) - { - /* We can't schedule any tasks, other than idle, that have a - * priority lower than the priority of a task currently running - * on another core. */ - uxCurrentPriority = tskIDLE_PRIORITY; - } + /* We can't schedule any tasks, other than idle, that have a + * priority lower than the priority of a task currently running + * on another core. */ + uxCurrentPriority = tskIDLE_PRIORITY; } + } #endif if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) @@ -945,18 +949,18 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxTCB = pxTaskItem->pvOwner; #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* When falling back to the idle priority because only one priority + * level is allowed to run at a time, we should ONLY schedule the true + * idle tasks, not user tasks at the idle priority. */ + if( uxCurrentPriority < uxTopReadyPriority ) { - /* When falling back to the idle priority because only one priority - * level is allowed to run at a time, we should ONLY schedule the true - * idle tasks, not user tasks at the idle priority. */ - if( uxCurrentPriority < uxTopReadyPriority ) + if( ( pxTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) { - if( ( pxTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) - { - continue; - } + continue; } } + } #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) @@ -1010,9 +1014,9 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, { uxTopReadyPriority--; #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) - { - xPriorityDropped = pdTRUE; - } + { + xPriorityDropped = pdTRUE; + } #endif } } @@ -1091,6 +1095,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, configASSERT( taskVALID_CORE_ID( uxCore ) ); xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + if( pxCurrentTCBs[ uxCore ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) { xTaskPriority = xTaskPriority - ( BaseType_t ) 1; @@ -1120,7 +1125,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, return xTaskScheduled; } -#endif /* ( configNUM_CORES > 1 ) */ +#endif /* ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -1134,18 +1139,18 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - return xTaskCreateStaticAffinitySet(pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY); - } + { + return xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY ); + } - TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const uint32_t ulStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer, - UBaseType_t uxCoreAffinityMask ) + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; @@ -1184,10 +1189,10 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - /* Set the task's affinity before scheduling it. */ - pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; - } + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } #endif prvAddNewTaskToReadyList( pxNewTCB ); @@ -1208,13 +1213,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); - } + { + return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } - BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, - UBaseType_t uxCoreAffinityMask, - TaskHandle_t * pxCreatedTask ) + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; @@ -1251,10 +1256,10 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxTaskDefinition->xRegions ); #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - /* Set the task's affinity before scheduling it */ - pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; - } + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } #endif prvAddNewTaskToReadyList( pxNewTCB ); @@ -1272,13 +1277,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); - } + { + return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } - BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, - UBaseType_t uxCoreAffinityMask, - TaskHandle_t * pxCreatedTask ) + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; @@ -1318,10 +1323,10 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, pxTaskDefinition->xRegions ); #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - /* Set the task's affinity before scheduling it */ - pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; - } + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } #endif prvAddNewTaskToReadyList( pxNewTCB ); @@ -1344,17 +1349,17 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - return xTaskCreateAffinitySet(pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask); - } + { + return xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask ); + } - BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const configSTACK_DEPTH_TYPE usStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - UBaseType_t uxCoreAffinityMask, - TaskHandle_t * const pxCreatedTask ) + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ { TCB_t * pxNewTCB; @@ -1433,10 +1438,10 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - /* Set the task's affinity before scheduling it */ - pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; - } + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } #endif prvAddNewTaskToReadyList( pxNewTCB ); @@ -1599,15 +1604,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - { - pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; - } + { + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; + } #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - { - pxNewTCB->xPreemptionDisable = 0; - } + { + pxNewTCB->xPreemptionDisable = 0; + } #endif /* Initialize the TCB stack to look as if the task was already running, @@ -1679,7 +1684,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { pxNewTCB->xTaskAttribute = 0; } - #endif + #endif /* if ( configNUM_CORES > 1 ) */ if( pxCreatedTask != NULL ) { @@ -1734,7 +1739,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - #else + #else /* if ( configNUM_CORES == 1 ) */ if( pxNewTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) { BaseType_t xCoreID; @@ -1750,7 +1755,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - #endif + #endif /* if ( configNUM_CORES == 1 ) */ } uxTaskNumber++; @@ -1904,8 +1909,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvYieldCore( xTaskRunningOnCore ); } } - #endif + #endif /* if ( configNUM_CORES == 1 ) */ } + #if ( configNUM_CORES > 1 ) taskEXIT_CRITICAL(); #endif @@ -2399,7 +2405,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ /* Remove compiler warning about unused variables when the port * optimised task selection is not being used. */ @@ -2605,6 +2611,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { taskEXIT_CRITICAL(); #if ( configNUM_CORES == 1 ) + /* The scheduler is not running, but the task that was pointed * to by pxCurrentTCB has just been suspended and pxCurrentTCB * must be adjusted to point to a different task. */ @@ -2620,7 +2627,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { vTaskSwitchContext(); } - #else + #else /* if ( configNUM_CORES == 1 ) */ + /* The scheduler is not running, but the task that was pointed * to by pxCurrentTCB has just been suspended and pxCurrentTCB * must be adjusted to point to a different task. */ @@ -2644,7 +2652,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; } } - #endif + #endif /* if ( configNUM_CORES == 1 ) */ } } else @@ -2796,6 +2804,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* Check if yield is required for this task in prvYieldForTask. */ xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) { /* Mark that a yield is pending in case the user is not @@ -2893,75 +2902,75 @@ static BaseType_t prvCreateIdleTasks( void ) #endif /* ( configNUM_CORES > 1 ) */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + if( xCoreID == 0 ) { - if( xCoreID == 0 ) - { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, - cIdleName, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - - #if ( configNUM_CORES > 1 ) - else - { - static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; - static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; - - xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - xIdleTaskStackBuffers[ xCoreID - 1 ], - &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif /* if ( configNUM_CORES > 1 ) */ + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } - if( xIdleTaskHandles[ xCoreID ] != NULL ) - { - xReturn = pdPASS; - } + #if ( configNUM_CORES > 1 ) else { - xReturn = pdFAIL; + static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + xIdleTaskStackBuffers[ xCoreID - 1 ], + &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } + #endif /* if ( configNUM_CORES > 1 ) */ + + if( xIdleTaskHandles[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; } + } #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + if( xCoreID == 0 ) { - if( xCoreID == 0 ) + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + #if ( configNUM_CORES > 1 ) + else { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, + xReturn = xTaskCreate( prvMinimalIdleTask, cIdleName, configMINIMAL_STACK_SIZE, ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - - #if ( configNUM_CORES > 1 ) - else - { - xReturn = xTaskCreate( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif - } + #endif + } #endif /* configSUPPORT_STATIC_ALLOCATION */ } @@ -2977,16 +2986,16 @@ void vTaskStartScheduler( void ) xReturn = prvCreateIdleTasks(); #if ( configUSE_TIMERS == 1 ) + { + if( xReturn == pdPASS ) { - if( xReturn == pdPASS ) - { - xReturn = xTimerCreateTimerTask(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); } + } #endif /* configUSE_TIMERS */ if( xReturn == pdPASS ) @@ -3141,7 +3150,7 @@ void vTaskSuspendAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ } /*----------------------------------------------------------*/ @@ -3215,6 +3224,7 @@ BaseType_t xTaskResumeAll( void ) BaseType_t xAlreadyYielded = pdFALSE; #if ( configNUM_CORES > 1 ) + /* Scheduler running status is not checked in vTaskSuspendAll in single * core implementation. This condition is only required for multiple cores. */ if( xSchedulerRunning != pdFALSE ) @@ -3265,11 +3275,12 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #else + #else /* if ( configNUM_CORES == 1 ) */ + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. * If the current core yielded then vTaskSwitchContext() has already been called * which sets xYieldPendings for the current core to pdTRUE. */ - #endif + #endif /* if ( configNUM_CORES == 1 ) */ } if( pxTCB != NULL ) @@ -3630,8 +3641,9 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /*----------------------------------------------------------*/ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) - /* SMP_TODO : This function returns only idle task handle for core 0. - * Consider to add another function to return the idle task handles. */ + +/* SMP_TODO : This function returns only idle task handle for core 0. + * Consider to add another function to return the idle task handles. */ TaskHandle_t xTaskGetIdleTaskHandle( void ) { /* If xTaskGetIdleTaskHandle() is called before the scheduler has been @@ -3902,6 +3914,7 @@ BaseType_t xTaskIncrementTick( void ) BaseType_t xYieldCoreID; xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) { xCoreYieldList[ xYieldCoreID ] = pdTRUE; @@ -3928,10 +3941,11 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else + #else /* if ( configNUM_CORES == 1 ) */ { /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce * the number of interrupts and also potentially help prevent tasks from * moving between cores as often. This, however, works for now. */ @@ -3947,7 +3961,7 @@ BaseType_t xTaskIncrementTick( void ) } } } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -3996,7 +4010,7 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else + #else /* if ( configNUM_CORES == 1 ) */ { BaseType_t xCoreID; @@ -4165,112 +4179,191 @@ BaseType_t xTaskIncrementTick( void ) #if ( configNUM_CORES == 1 ) void vTaskSwitchContext( void ) -#else - void vTaskSwitchContextForCore( BaseType_t xCoreID ) -#endif -{ - /* Acquire both locks: - * - The ISR lock protects the ready list from simultaneous access by - * both other ISRs and tasks. - * - We also take the task lock to pause here in case another core has - * suspended the scheduler. We don't want to simply set xYieldPending - * and move on if another core suspended the scheduler. We should only - * do that if the current core has suspended the scheduler. */ - - portGET_TASK_LOCK(); /* Must always acquire the task lock first */ - portGET_ISR_LOCK(); - { - /* vTaskSwitchContextForCore() must never be called from within a critical section. - * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); - #endif + { + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ + + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_ISR_LOCK(); + { + /* vTaskSwitchContextForCore() must never be called from within a critical section. + * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); + #endif - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) - { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - #if ( configNUM_CORES == 1 ) + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + * switch. */ xYieldPendings[ 0 ] = pdTRUE; - #else - xYieldPendings[ xCoreID ] = pdTRUE; - #endif - } - else - { - #if ( configNUM_CORES == 1 ) + } + else + { xYieldPendings[ 0 ] = pdFALSE; - #else - xYieldPendings[ xCoreID ] = pdFALSE; - #endif - traceTASK_SWITCHED_OUT(); + traceTASK_SWITCHED_OUT(); - #if ( configGENERATE_RUN_TIME_STATS == 1 ) - { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); - #endif + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; } - else + #endif + + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) { - mtCOVERAGE_TEST_MARKER(); + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; } + #endif - ulTaskSwitchedInTime = ulTotalRunTime; + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + } + #endif } - #endif /* configGENERATE_RUN_TIME_STATS */ - - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); + } + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + } +#else /* if ( configNUM_CORES == 1 ) */ + void vTaskSwitchContextForCore( BaseType_t xCoreID ) + { + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ + + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_ISR_LOCK(); + { + /* vTaskSwitchContextForCore() must never be called from within a critical section. + * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); + #endif - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ 0 ] = pdTRUE; } - #endif + else + { + xYieldPendings[ xCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - #if ( configNUM_CORES == 1 ) - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - #else + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + * optimised asm code. */ ( void ) prvSelectHighestPriorityTask( xCoreID ); - #endif - traceTASK_SWITCHED_IN(); + traceTASK_SWITCHED_IN(); - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - { - /* Switch C-Runtime's TLS Block to point to the TLS - * Block specific to this task. */ - configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + } + #endif } - #endif } + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); } - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); -} +#endif /* if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) @@ -4491,6 +4584,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, taskENTER_CRITICAL(); { xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreID ) ) { xYieldPendings[ xYieldCoreID ] = pdTRUE; @@ -4498,7 +4592,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, } taskEXIT_CRITICAL(); } - #endif /* ( configUSE_PREEMPTION == 1 ) */ + #endif /* ( configUSE_PREEMPTION == 1 ) */ } /*-----------------------------------------------------------*/ @@ -4652,52 +4746,52 @@ void vTaskMissedYield( void ) for( ; ; ) { #if ( configUSE_PREEMPTION == 0 ) - { - /* If we are not using preemption we keep forcing a task switch to - * see if any other task has become available. If we are using - * preemption we don't need to do this as any task becoming available - * will automatically get the processor anyway. */ - taskYIELD(); - } + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } #endif /* configUSE_PREEMPTION */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) { - /* When using preemption tasks of equal priority will be - * timesliced. If a task that is sharing the idle priority is ready - * to run then the idle task should yield before the end of the - * timeslice. - * - * A critical region is not required here as we are just reading from - * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains one more task than the - * number of idle tasks, which is equal to the configured numbers of cores - * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) - { - taskYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); } + } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) - { - extern void vApplicationMinimalIdleHook( void ); + { + extern void vApplicationMinimalIdleHook( void ); - /* Call the user defined function from within the idle task. This - * allows the application designer to add background functionality - * without the overhead of a separate task. - * - * This hook is intended to manage core activity such as disabling cores that go idle. - * - * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, - * CALL A FUNCTION THAT MIGHT BLOCK. */ - vApplicationMinimalIdleHook(); - } + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } #endif /* configUSE_MINIMAL_IDLE_HOOK */ } } @@ -4834,19 +4928,19 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #endif /* configUSE_TICKLESS_IDLE */ #if ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) - { - extern void vApplicationMinimalIdleHook( void ); + { + extern void vApplicationMinimalIdleHook( void ); - /* Call the user defined function from within the idle task. This - * allows the application designer to add background functionality - * without the overhead of a separate task. - * - * This hook is intended to manage core activity such as disabling cores that go idle. - * - * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, - * CALL A FUNCTION THAT MIGHT BLOCK. */ - vApplicationMinimalIdleHook(); - } + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } #endif /* configUSE_MINIMAL_IDLE_HOOK */ } } @@ -5390,7 +5484,7 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - #endif + #endif /* if ( configNUM_CORES == 1 ) */ #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -5709,10 +5803,11 @@ static void prvResetNextTaskUnblockTime( void ) portYIELD_WITHIN_API(); } #else - /*If not in a critical section then yield immediately. - * Otherwise set xYieldPendings to true to wait to - * yield until exiting the critical section. - */ + +/*If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ void vTaskYieldWithinAPI( void ) { if( pxCurrentTCB->uxCriticalNesting == 0U ) @@ -5724,7 +5819,7 @@ static void prvResetNextTaskUnblockTime( void ) xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } -#endif +#endif /* if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -5798,6 +5893,7 @@ static void prvResetNextTaskUnblockTime( void ) { mtCOVERAGE_TEST_MARKER(); } + return uxSavedInterruptStatus; } @@ -5808,8 +5904,6 @@ static void prvResetNextTaskUnblockTime( void ) void vTaskExitCritical( void ) { - BaseType_t xYieldCurrentTask; - if( xSchedulerRunning != pdFALSE ) { /* If pxCurrentTCB->uxCriticalNesting is zero then this function @@ -5827,6 +5921,9 @@ static void prvResetNextTaskUnblockTime( void ) if( pxCurrentTCB->uxCriticalNesting == 0U ) { #if ( configNUM_CORES > 1 ) + { + BaseType_t xYieldCurrentTask; + /* Get the xYieldPending stats inside the critical section. */ xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; @@ -5842,8 +5939,11 @@ static void prvResetNextTaskUnblockTime( void ) { portYIELD(); } - #else + } + #else /* if ( configNUM_CORES > 1 ) */ + { portENABLE_INTERRUPTS(); + } #endif /* ( configNUM_CORES > 1 ) */ } else @@ -6593,6 +6693,7 @@ TickType_t uxTaskResetEventItemValue( void ) } xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreId ) ) { if( pxHigherPriorityTaskWoken != NULL ) @@ -6688,6 +6789,7 @@ TickType_t uxTaskResetEventItemValue( void ) } xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); + if( taskVALID_CORE_ID( xYieldCoreId ) ) { if( pxHigherPriorityTaskWoken != NULL ) @@ -6792,7 +6894,7 @@ TickType_t uxTaskResetEventItemValue( void ) return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) @@ -6814,6 +6916,7 @@ TickType_t uxTaskResetEventItemValue( void ) { ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; } + ulReturn = ulRunTimeCounter / ulTotalTime; } else diff --git a/timers.c b/timers.c index e0ac7a98fcd..058631175a1 100644 --- a/timers.c +++ b/timers.c @@ -72,17 +72,17 @@ #define tmrSTATUS_IS_AUTORELOAD ( ( uint8_t ) 0x04 ) /* The definition of the timers themselves. */ - typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ + typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ - TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ - void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ + void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif - uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ + uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ } xTIMER; /* The old xTIMER name is maintained above then typedefed to the new Timer_t @@ -103,7 +103,7 @@ typedef struct tmrCallbackParameters { - portTIMER_CALLBACK_ATTRIBUTE + portTIMER_CALLBACK_ATTRIBUTE PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ void * pvParameter1; /* << The value that will be used as the callback functions first parameter. */ uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ @@ -383,92 +383,92 @@ traceTIMER_CREATE( pxNewTimer ); } /*-----------------------------------------------------------*/ - - BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) - { - BaseType_t xReturn = pdFAIL; + + BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) + { + BaseType_t xReturn = pdFAIL; DaemonTaskMessage_t xMessage; - ( void ) pxHigherPriorityTaskWoken; - - configASSERT( xTimer ); - - /* Send a message to the timer service task to perform a particular action - * on a particular timer definition. */ - if( xTimerQueue != NULL ) - { - /* Send a command to the timer service task to start the xTimer timer. */ - xMessage.xMessageID = xCommandID; - xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; - xMessage.u.xTimerParameters.pxTimer = xTimer; - - configASSERT( xCommandID < tmrFIRST_FROM_ISR_COMMAND ); - - if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) - { - if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) - { - xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); - } - else - { - xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); - } - } - - traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - return xReturn; - } -/*-----------------------------------------------------------*/ - - BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) - { - BaseType_t xReturn = pdFAIL; + ( void ) pxHigherPriorityTaskWoken; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + * on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + configASSERT( xCommandID < tmrFIRST_FROM_ISR_COMMAND ); + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) + { + BaseType_t xReturn = pdFAIL; DaemonTaskMessage_t xMessage; - ( void ) xTicksToWait; - - configASSERT( xTimer ); - - /* Send a message to the timer service task to perform a particular action - * on a particular timer definition. */ - if( xTimerQueue != NULL ) - { - /* Send a command to the timer service task to start the xTimer timer. */ - xMessage.xMessageID = xCommandID; - xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; - xMessage.u.xTimerParameters.pxTimer = xTimer; - - configASSERT( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ); - - if( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ) - { - xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); - } - - traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - return xReturn; - } + ( void ) xTicksToWait; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + * on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + configASSERT( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ); + + if( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ) + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*-----------------------------------------------------------*/ TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) From 04e97b168f0babbde393b9a855442d8e8767b713 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sun, 16 Oct 2022 21:26:45 +0800 Subject: [PATCH 105/164] Fix Add new task for single core when scheduler not running --- tasks.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tasks.c b/tasks.c index c923cb45abb..d60dc53c98b 100644 --- a/tasks.c +++ b/tasks.c @@ -1707,7 +1707,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { uxCurrentNumberOfTasks++; - if( xSchedulerRunning == pdFALSE ) + #if ( configNUM_CORES > 1 ) + if( xSchedulerRunning == pdFALSE ) + #endif { if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { @@ -1730,13 +1732,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + if( xSchedulerRunning == pdFALSE ) { - pxCurrentTCB = pxNewTCB; - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } #else /* if ( configNUM_CORES == 1 ) */ From 1a8af51fbdbb20449a582e77bb8f2c774e519522 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sun, 16 Oct 2022 21:37:08 +0800 Subject: [PATCH 106/164] Fix priority set when task is not in ready list for single core --- tasks.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index d60dc53c98b..467469f2862 100644 --- a/tasks.c +++ b/tasks.c @@ -2370,10 +2370,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - /* It's possible that xYieldForTask was already set to pdTRUE because - * its priority is being raised. However, since it is not in a ready list - * we don't actually need to yield for it. */ - xYieldForTask = pdFALSE; + #if ( configNUM_CORES > 1 ) + /* It's possible that xYieldForTask was already set to pdTRUE because + * its priority is being raised. However, since it is not in a ready list + * we don't actually need to yield for it. */ + xYieldForTask = pdFALSE; + #endif } #if ( configUSE_PREEMPTION == 1 ) From 1b2b2a8532a69ab39d6196ce83bbfa96006a2419 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sun, 16 Oct 2022 21:33:14 +0800 Subject: [PATCH 107/164] Fix vTaskResume when task is not running --- tasks.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tasks.c b/tasks.c index 467469f2862..d48ded28cf9 100644 --- a/tasks.c +++ b/tasks.c @@ -2726,12 +2726,18 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* It does not make sense to resume the calling task. */ configASSERT( xTaskToResume ); - /* The parameter cannot be NULL as it is impossible to resume the - * currently executing task. It is also impossible to resume a task - * that is actively running on another core but it is too dangerous - * to check their run state here. Safer to get into a critical section - * and check if it is actually suspended or not below. */ - if( pxTCB != NULL ) + #if ( configNUM_CORES == 1 ) + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. */ + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + #else + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. It is also impossible to resume a task + * that is actively running on another core but it is too dangerous + * to check their run state here. Safer to get into a critical section + * and check if it is actually suspended or not below. */ + if( pxTCB != NULL ) + #endif { taskENTER_CRITICAL(); { From 56d1d75c6c4667a6d2bbdfeec99614273f8fd756 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Sun, 16 Oct 2022 21:42:43 +0800 Subject: [PATCH 108/164] Fix uncrustify formating warning --- tasks.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index d48ded28cf9..845ac863a78 100644 --- a/tasks.c +++ b/tasks.c @@ -2371,6 +2371,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) else { #if ( configNUM_CORES > 1 ) + /* It's possible that xYieldForTask was already set to pdTRUE because * its priority is being raised. However, since it is not in a ready list * we don't actually need to yield for it. */ @@ -2727,10 +2728,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( xTaskToResume ); #if ( configNUM_CORES == 1 ) + /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ - if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( pxTCB != NULL ) ) #else + /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. It is also impossible to resume a task * that is actively running on another core but it is too dangerous From 9763a3548314ed4b90dc6113b8be31a230fe82ff Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 17 Oct 2022 21:22:35 +0800 Subject: [PATCH 109/164] Add portCHECK_IF_IN_ISR for SMP --- include/FreeRTOS.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 698beaaeb44..81d47d7c1fb 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -438,6 +438,14 @@ #endif /* portGET_ISR_LOCK */ +#ifndef portCHECK_IF_IN_ISR + + #if ( configNUM_CORES > 1 ) + #error portCHECK_IF_IN_ISR is required in SMP + #endif + +#endif /* portCHECK_IF_IN_ISR */ + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 From e0c1bf8a10dac6ea3974abb8e62e3e8b46fc5b8a Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Fri, 21 Oct 2022 11:40:30 +0800 Subject: [PATCH 110/164] Format vTaskSwitchContext --- tasks.c | 159 ++++++++++++++++++++++++-------------------------------- 1 file changed, 69 insertions(+), 90 deletions(-) diff --git a/tasks.c b/tasks.c index 845ac863a78..2087d098501 100644 --- a/tasks.c +++ b/tasks.c @@ -4196,97 +4196,89 @@ BaseType_t xTaskIncrementTick( void ) #if ( configNUM_CORES == 1 ) void vTaskSwitchContext( void ) { - /* Acquire both locks: - * - The ISR lock protects the ready list from simultaneous access by - * both other ISRs and tasks. - * - We also take the task lock to pause here in case another core has - * suspended the scheduler. We don't want to simply set xYieldPending - * and move on if another core suspended the scheduler. We should only - * do that if the current core has suspended the scheduler. */ - - portGET_TASK_LOCK(); /* Must always acquire the task lock first */ - portGET_ISR_LOCK(); + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { - /* vTaskSwitchContextForCore() must never be called from within a critical section. - * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); - #endif + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + xYieldPendings[ 0 ] = pdFALSE; + traceTASK_SWITCHED_OUT(); - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + #if ( configGENERATE_RUN_TIME_STATS == 1 ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPendings[ 0 ] = pdTRUE; - } - else - { - xYieldPendings[ 0 ] = pdFALSE; - traceTASK_SWITCHED_OUT(); + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif - #if ( configGENERATE_RUN_TIME_STATS == 1 ) + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); - #endif - - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) - { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - ulTaskSwitchedInTime = ulTotalRunTime; + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); } - #endif /* configGENERATE_RUN_TIME_STATS */ - - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); - - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) + else { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + mtCOVERAGE_TEST_MARKER(); } - #endif - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - { - /* Switch C-Runtime's TLS Block to point to the TLS - * Block specific to this task. */ - configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); - } - #endif + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; } + #endif + + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + } + #endif } - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); } #else /* if ( configNUM_CORES == 1 ) */ + void vTaskSwitchContext( void ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + vTaskSwitchContextForCore( xCoreID ); + } +#endif /* if ( configNUM_CORES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_CORES > 1 ) void vTaskSwitchContextForCore( BaseType_t xCoreID ) { /* Acquire both locks: @@ -4379,20 +4371,7 @@ BaseType_t xTaskIncrementTick( void ) portRELEASE_ISR_LOCK(); portRELEASE_TASK_LOCK(); } -#endif /* if ( configNUM_CORES == 1 ) */ - -/*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) - void vTaskSwitchContext( void ) - { - BaseType_t xCoreID; - - xCoreID = portGET_CORE_ID(); - - vTaskSwitchContextForCore( xCoreID ); - } -#endif - +#endif /* if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ void vTaskPlaceOnEventList( List_t * const pxEventList, From 54faeaf39d822dcc8bc1b0e3fcf8754e4409ec2f Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 25 Oct 2022 21:40:25 +0800 Subject: [PATCH 111/164] Fix vTaskSwitchContextForCore bug due to uncrustify --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 2087d098501..38d93ffa404 100644 --- a/tasks.c +++ b/tasks.c @@ -4302,7 +4302,7 @@ BaseType_t xTaskIncrementTick( void ) { /* The scheduler is currently suspended - do not allow a context * switch. */ - xYieldPendings[ 0 ] = pdTRUE; + xYieldPendings[ xCoreID ] = pdTRUE; } else { From 9636cdb57633dc8f3f9357bd919b0639a93d24d1 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Sun, 6 Nov 2022 20:47:40 +0000 Subject: [PATCH 112/164] First review - did not build yet Signed-off-by: Gaurav Aggarwal --- tasks.c | 1467 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 859 insertions(+), 608 deletions(-) diff --git a/tasks.c b/tasks.c index 38d93ffa404..1e7b2c6d7f6 100644 --- a/tasks.c +++ b/tasks.c @@ -137,24 +137,22 @@ /*-----------------------------------------------------------*/ - #if ( configNUM_CORES == 1 ) - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority = uxTopReadyPriority; \ - \ - /* Find the highest priority queue that contains ready tasks. */ \ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ - { \ - configASSERT( uxTopPriority ); \ - --uxTopPriority; \ - } \ - \ - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ - #endif /* if ( configNUM_CORES == 1 ) */ +#define taskSELECT_HIGHEST_PRIORITY_TASK() \ +{ \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + * the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ +} /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -167,7 +165,7 @@ #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ #if ( configNUM_CORES > 1 ) - #error configUSE_PORT_OPTIMISED_TASK_SELECTION not yet supported in SMP + #error configUSE_PORT_OPTIMISED_TASK_SELECTION not supported in FreeRTOS SMP. #endif /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is @@ -257,25 +255,30 @@ #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL #endif +/* Task state. */ +typedef BaseType_t TaskRunning_t; + /* Indicates that the task is not actively running on any core. */ #define taskTASK_NOT_RUNNING ( TaskRunning_t ) ( -1 ) /* Indicates that the task is actively running but scheduled to yield. */ #define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) -/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ +/* taskTASK_IS_RUNNING - Returns pdTRUE if the task is actively running + * and not scheduled to yield. + * taskTASK_IS_YIELDING - Returns pdTRUE if the task is actively running + * but scheduled to yield. + */ #if ( configNUM_CORES == 1 ) #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) #else - #define taskTASK_IS_RUNNING( pxTCB ) ( ( 0 <= pxTCB->xTaskRunState ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( pxTCB->xTaskRunState >= 0 ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) #endif /* Indicates that the task is an Idle task. */ -#define taskATTRIBUTE_IS_IDLE ( BaseType_t ) ( 1UL << 0 ) - -typedef BaseType_t TaskRunning_t; +#define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1UL << 0UL ) /* * Task control block. A task control block (TCB) is allocated for each task, @@ -291,7 +294,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) - UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as confNUM_CORES. */ #endif ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ @@ -299,13 +302,13 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /*< Points to the start of the stack. */ #if ( configNUM_CORES > 1 ) - volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ - BaseType_t xTaskAttribute; /*< Used to identify the idle tasks. */ + volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */ + UBaseType_t uxTaskAttributes; /*< Task's attributes - currently used to identify the idle tasks. */ #endif char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted */ + BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted. */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -371,7 +374,7 @@ typedef tskTCB TCB_t; #if ( configNUM_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else -portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -415,7 +418,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -431,9 +434,9 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * when the scheduler is unsuspended. The pending ready list itself can only be * accessed from a critical section. * - * Updates to uxSchedulerSuspended must be protected by both the task and ISR locks and - * must not be done by an ISR. Reads must be protected by either lock and may be done by - * either an ISR or a task. */ + * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock + * and must not be done from an ISR. Reads must be protected by either lock and may be done + * from either an ISR or a task. */ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -452,37 +455,40 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* File private functions. --------------------------------*/ /* - * Creates the idle tasks during scheduler start + * Creates the idle tasks during scheduler start. */ static BaseType_t prvCreateIdleTasks( void ); #if ( configNUM_CORES > 1 ) - -/* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a critical section and yields if so. - */ + /* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields, if so. + */ static void prvCheckForRunStateChange( void ); -#endif /* ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ -/* - * Yields the given core. - */ -static void prvYieldCore( BaseType_t xCoreID ); +#if ( configNUM_CORES > 1 ) + /* + * Yields the given core. + */ + static void prvYieldCore( BaseType_t xCoreID ); +#endif /* #if ( configNUM_CORES > 1 ) */ -/* - * Yields a core, or cores if multiple priorities are not allowed to run - * simultaneously, to allow the task pxTCB to run. Negative value is returned if - * yeilding for the task is not required. Otherwise, core ID is returned. - */ -static BaseType_t prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority, - BaseType_t xYieldForTask ); +#if ( configNUM_CORES > 1 ) + /* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ + static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ); +#endif /* #if ( configNUM_CORES > 1 ) */ -/* - * Selects the highest priority available task - */ -static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); +#if ( configNUM_CORES > 1 ) + /* + * Selects the highest priority available task for the given core. + */ + static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); +#endif /* #if ( configNUM_CORES > 1 ) */ /** * Utility task that simply returns pdTRUE if the task referenced by xTask is @@ -506,10 +512,15 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * The idle task is automatically created and added to the ready lists upon * creation of the first user task. * + * In the FreeRTOS SMP, configNUM_CORES - 1 minimal idle tasks are also + * created to ensure that each core has an idle task to run when no other + * task is available to run. + * * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: + * language extensions. The equivalent prototype for these functions are: * * void prvIdleTask( void *pvParameters ); + * void prvMinimalIdleTask( void *pvParameters ); * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; @@ -653,7 +664,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; UBaseType_t uxPrevSchedulerSuspended; TCB_t * pxThisTCB; - /* This function should not be called in ISR. If the task on the current + /* This should be skipped if called from an ISR. If the task on the current * core is no longer running, then vTaskSwitchContext() probably should * be run before returning, but we don't have a way to force that to happen * from here. */ @@ -666,23 +677,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) { /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then do it all over again - * if our state changed again during the reacquisition. */ - + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then, do it all over again + * if our state changed again during the reacquisition. */ uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; uxPrevSchedulerSuspended = uxSchedulerSuspended; - /* this must only be called the first time we enter into a critical + /* This must only be called the first time we enter into a critical * section, otherwise it could context switch in the middle of a * critical section. */ - configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); - - uxSchedulerSuspended = 0U; + configASSERT( ( uxPrevCriticalNesting + uxPrevSchedulerSuspended ) == 1U ); if( uxPrevCriticalNesting > 0U ) { @@ -692,7 +700,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } else { - /* uxPrevSchedulerSuspended must be 1 */ + uxSchedulerSuspended = 0U; portRELEASE_TASK_LOCK(); } @@ -714,24 +722,19 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting == 0U ) { - /* uxPrevSchedulerSuspended must be 1 */ + /* uxPrevSchedulerSuspended must be 1. */ configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); portRELEASE_ISR_LOCK(); } } } } -#endif /* if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ -static void prvYieldCore( BaseType_t xCoreID ) -{ - #if ( configNUM_CORES == 1 ) - { - configASSERT( xCoreID == 0 ); - portYIELD_WITHIN_API(); - } - #else + +#if ( configNUM_CORES > 1 ) + static void prvYieldCore( BaseType_t xCoreID ) { /* This must be called from a critical section and xCoreID must be valid. */ if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) @@ -754,77 +757,50 @@ static void prvYieldCore( BaseType_t xCoreID ) } } } - #endif /* if ( configNUM_CORES == 1 ) */ -} - +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ -static BaseType_t prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority, - BaseType_t xYieldForTask ) -{ - #if ( configNUM_CORES == 1 ) - { - BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ - - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - xLowestPriorityCore = ( ( BaseType_t ) 0 ); - } - else - { - if( ( xPreemptEqualPriority == pdTRUE ) && ( pxTCB->uxPriority == pxCurrentTCB->uxPriority ) ) - { - xLowestPriorityCore = ( ( BaseType_t ) 0 ); - } - } - if( taskVALID_CORE_ID( xLowestPriorityCore ) && ( xYieldForTask == pdTRUE ) ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - - return xLowestPriorityCore; - } - #else /* if ( configNUM_CORES == 1 ) */ +#if ( configNUM_CORES > 1 ) + static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ) { - BaseType_t xLowestPriority; - BaseType_t xTaskPriority; - BaseType_t xLowestPriorityCore = ( ( BaseType_t ) -1 ); /* Negative value to indicate no yielding required. */ + BaseType_t xLowestPriorityToPreempt; + BaseType_t xCurrentCoreTaskPriority; + BaseType_t xLowestPriorityCore = ( BaseType_t ) -1; BaseType_t xYieldCount = 0; BaseType_t xCoreID; /* This must be called from a critical section. */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) - - /* No task should yield for this one if it is a lower priority - * than priority level of currently ready tasks. */ - if( pxTCB->uxPriority >= uxTopReadyPriority ) - #endif + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority >= uxTopReadyPriority ) + #endif { - xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; + xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority; if( xPreemptEqualPriority == pdFALSE ) { - /* xLowestPriority will be decremented to -1 if the priority of pxTCB + /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ - --xLowestPriority; + --xLowestPriorityToPreempt; } for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; + xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ - if( pxCurrentTCBs[ xCoreID ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { - xTaskPriority = xTaskPriority - 1; + xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1; } if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) { - if( xTaskPriority <= xLowestPriority ) + if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt ) { #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) @@ -834,7 +810,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) #endif { - xLowestPriority = xTaskPriority; + xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; } } @@ -848,7 +824,8 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, { /* Yield all currently running non-idle tasks with a priority lower than * the task that needs to run. */ - if( ( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) + if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) && + ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) { prvYieldCore( xCoreID ); xYieldCount++; @@ -858,7 +835,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, mtCOVERAGE_TEST_MARKER(); } } - #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 */ + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ } else { @@ -868,31 +845,24 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, if( ( xYieldCount == 0 ) && taskVALID_CORE_ID( xLowestPriorityCore ) ) { - if( xYieldForTask == pdTRUE ) - { - prvYieldCore( xLowestPriorityCore ); - } + prvYieldCore( xLowestPriorityCore ); } #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) /* Verify that the calling core always yields to higher priority tasks. */ - if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) && + if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 ) && ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) { - configASSERT( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE || taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ); + configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) || + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) ); } #endif } - - return xLowestPriorityCore; } - #endif /* ( configNUM_CORES == 1 ) */ -} - +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) { UBaseType_t uxCurrentPriority = uxTopReadyPriority; @@ -955,13 +925,13 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, * idle tasks, not user tasks at the idle priority. */ if( uxCurrentPriority < uxTopReadyPriority ) { - if( ( pxTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) == pdFALSE ) + if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 ) { continue; } } } - #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { @@ -1038,89 +1008,95 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { if( xPriorityDropped != pdFALSE ) { /* There may be several ready tasks that were being prevented from running because there was * a higher priority task running. Now that the last of the higher priority tasks is no longer * running, make sure all the other idle tasks yield. */ - UBaseType_t x; + BaseType_t x; for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) { - if( pxCurrentTCBs[ x ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { prvYieldCore( x ); } } } - #endif /* if( configRUN_MULTIPLE_PRIORITIES == 0 ) */ - } + } + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + #if ( configUSE_CORE_AFFINITY == 1 ) { - /* A ready task was just bumped off this core. Look at the cores it can run from - * to see if it is able to run on any of them. */ - UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; - BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; - BaseType_t xLowestPriorityCore = -1; - - if( pxPreviousTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { - xLowestPriority = xLowestPriority - 1; - } + /* A ready task was just evicted from this core. See if it can be + * scheduled on any other core. */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; + BaseType_t xLowestPriorityCore = -1; - if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) - { - /* The ready task that was removed from this core is not excluded from it. - * Only look at the intersection of the cores the removed task is allowed to run - * on with the cores that the new task is excluded from. It is possible that the - * new task was only placed onto this core because it is excluded from another. - * Check to see if the previous task could run on one of those cores. */ - uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); - } - else - { - /* The ready task that was removed from this core is excluded from it. */ - } + if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + xLowestPriority = xLowestPriority - 1; + } - uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } - while( uxCoreMap != 0 ) - { - uint32_t uxCore; - BaseType_t xTaskPriority; + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); - uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); - configASSERT( taskVALID_CORE_ID( uxCore ) ); + while( uxCoreMap != 0 ) + { + uint32_t uxCore; + BaseType_t xTaskPriority; - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + configASSERT( taskVALID_CORE_ID( uxCore ) ); - if( pxCurrentTCBs[ uxCore ]->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) - { - xTaskPriority = xTaskPriority - ( BaseType_t ) 1; - } + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; - uxCoreMap &= ~( 1 << uxCore ); + if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } - if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) - { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif + uxCoreMap &= ~( 1 << uxCore ); + + if( ( xTaskPriority < xLowestPriority ) && + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && + ( xYieldPendings[ uxCore ] == pdFALSE ) ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } } } - } - if( taskVALID_CORE_ID( xLowestPriorityCore ) ) - { - prvYieldCore( xLowestPriorityCore ); + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } } } - #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */ + } return xTaskScheduled; } @@ -1151,7 +1127,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer, UBaseType_t uxCoreAffinityMask ) - #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; TaskHandle_t xReturn; @@ -1220,7 +1196,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) - #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1257,7 +1233,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { - /* Set the task's affinity before scheduling it */ + /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; } #endif @@ -1284,7 +1260,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) - #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1324,7 +1300,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { - /* Set the task's affinity before scheduling it */ + /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; } #endif @@ -1360,7 +1336,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, UBaseType_t uxPriority, UBaseType_t uxCoreAffinityMask, TaskHandle_t * const pxCreatedTask ) - #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn; @@ -1439,7 +1415,7 @@ static BaseType_t prvYieldForTask( TCB_t * pxTCB, #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { - /* Set the task's affinity before scheduling it */ + /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; } #endif @@ -1667,24 +1643,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* portUSING_MPU_WRAPPERS */ - /* Initialize to not running. */ + /* Initialize task state and task attributes. */ #if ( configNUM_CORES > 1 ) + { pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ - if( pxTaskCode == prvIdleTask ) + if( ( pxTaskCode == prvIdleTask ) || ( pxTaskCode == prvMinimalIdleTask ) ) { - pxNewTCB->xTaskAttribute = taskATTRIBUTE_IS_IDLE; + pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE; } - else if( pxTaskCode == prvMinimalIdleTask ) - { - pxNewTCB->xTaskAttribute = taskATTRIBUTE_IS_IDLE; - } - else - { - pxNewTCB->xTaskAttribute = 0; - } - #endif /* if ( configNUM_CORES > 1 ) */ + } + #endif /* #if ( configNUM_CORES > 1 ) */ if( pxCreatedTask != NULL ) { @@ -1699,6 +1669,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } /*-----------------------------------------------------------*/ +#if ( configNUM_CORES == 1 ) + static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ensure interrupts don't access the task lists while the lists are being @@ -1707,10 +1679,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { uxCurrentNumberOfTasks++; - #if ( configNUM_CORES > 1 ) - if( xSchedulerRunning == pdFALSE ) - #endif + if( pxCurrentTCB == NULL ) { + /* There are no other tasks, or all the other tasks are in + * the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { /* This is the first task to be created so do the preliminary @@ -1722,45 +1696,111 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { mtCOVERAGE_TEST_MARKER(); } - - #if ( configNUM_CORES == 1 ) - if( pxCurrentTCB == NULL ) + } + else + { + /* If the scheduler is not already running, make this task the + * current task if it is the highest priority task to be created + * so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ pxCurrentTCB = pxNewTCB; } else { - if( xSchedulerRunning == pdFALSE ) - { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) - { - pxCurrentTCB = pxNewTCB; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + mtCOVERAGE_TEST_MARKER(); } - #else /* if ( configNUM_CORES == 1 ) */ - if( pxNewTCB->xTaskAttribute & taskATTRIBUTE_IS_IDLE ) - { - BaseType_t xCoreID; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); - /* Check if a core is free. */ - for( xCoreID = ( UBaseType_t ) 0; xCoreID < ( UBaseType_t ) configNUM_CORES; xCoreID++ ) + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + * then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} + +#else /* #if ( configNUM_CORES == 1 ) */ + +static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + + if( xSchedulerRunning == pdFALSE ) + { + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) + { + if( pxCurrentTCBs[ xCoreID ] == NULL ) { - if( pxCurrentTCBs[ xCoreID ] == NULL ) - { - pxNewTCB->xTaskRunState = xCoreID; - pxCurrentTCBs[ xCoreID ] = pxNewTCB; - break; - } + pxNewTCB->xTaskRunState = xCoreID; + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); } } - #endif /* if ( configNUM_CORES == 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } uxTaskNumber++; @@ -1783,7 +1823,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * currently running task and preemption is on then it should * run now. */ #if ( configUSE_PREEMPTION == 1 ) - ( void ) prvYieldForTask( pxNewTCB, pdFALSE, pdTRUE ); + prvYieldForTask( pxNewTCB, pdFALSE ); #endif } else @@ -1793,6 +1833,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } taskEXIT_CRITICAL(); } + +#endif /* #if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskDelete == 1 ) @@ -1800,7 +1842,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskDelete( TaskHandle_t xTaskToDelete ) { TCB_t * pxTCB; - TaskRunning_t xTaskRunningOnCore; taskENTER_CRITICAL(); { @@ -1808,16 +1849,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); - #if ( configNUM_CORES == 1 ) - { - xTaskRunningOnCore = ( TaskRunning_t ) 0; - } - #else - { - xTaskRunningOnCore = pxTCB->xTaskRunState; - } - #endif - /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { @@ -1847,7 +1878,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* If the task is running (or yielding), we must add it to the * termination list so that an idle task can delete it when it is * no longer running. */ - if( taskTASK_IS_RUNNING( pxTCB ) || taskTASK_IS_YIELDING( pxTCB ) ) + #if ( configNUM_CORES == 1 ) + if( pxTCB == pxCurrentTCB ) + #else + if( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ) + #endif { /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. @@ -1870,7 +1905,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ xTaskRunningOnCore ] ); + #if ( configNUM_CORES == 1 ) + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ 0 ] ); + #else + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ pxTCB->xTaskRunState ] ); + #endif } else { @@ -1882,44 +1921,61 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } + #if ( configNUM_CORES == 1 ) + { taskEXIT_CRITICAL(); - #endif - /* If the task is not deleting itself, call prvDeleteTCB from outside of - * critical section. If a task deletes itself, prvDeleteTCB is called - * from prvCheckTasksWaitingTermination which is called from Idle task. */ - if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( taskTASK_IS_YIELDING( pxTCB ) == pdFALSE ) ) - { - prvDeleteTCB( pxTCB ); - } + /* If the task is not deleting itself, call prvDeleteTCB from outside of + * critical section. If a task deletes itself, prvDeleteTCB is called + * from prvCheckTasksWaitingTermination which is called from Idle task. */ + if( pxTCB != pxCurrentTCB ) + { + prvDeleteTCB( pxTCB ); + } - /* Force a reschedule if the task that has just been deleted was running. */ - if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) ) ) + /* Force a reschedule if it is the currently running task that has just + * been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* #if ( configNUM_CORES == 1 ) */ { - #if ( configNUM_CORES == 1 ) + /* If a running task is not deleting itself, call prvDeleteTCB. If a running + * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination + * which is called from Idle task. */ + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); + prvDeleteTCB( pxTCB ); } - #else + + /* Force a reschedule if the task that has just been deleted was running. */ + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) ) ) { - if( xTaskRunningOnCore == portGET_CORE_ID() ) + if( pxTCB->xTaskRunState == portGET_CORE_ID() ) { configASSERT( uxSchedulerSuspended == 0 ); vTaskYieldWithinAPI(); } else { - prvYieldCore( xTaskRunningOnCore ); + prvYieldCore( pxTCB->xTaskRunState ); } } - #endif /* if ( configNUM_CORES == 1 ) */ - } - #if ( configNUM_CORES > 1 ) taskEXIT_CRITICAL(); - #endif + } + #endif /* #if ( configNUM_CORES == 1 ) */ } #endif /* INCLUDE_vTaskDelete */ @@ -2000,7 +2056,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + portYIELD_WITHIN_API(); + #else + vTaskYieldWithinAPI(); + #endif } else { @@ -2024,7 +2084,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { vTaskSuspendAll(); { - /* Move the assert inside since there can be multiple cores running. */ configASSERT( uxSchedulerSuspended == 1 ); traceTASK_DELAY(); @@ -2049,7 +2108,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + portYIELD_WITHIN_API(); + #else + vTaskYieldWithinAPI(); + #endif } else { @@ -2073,7 +2136,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); #if ( configNUM_CORES == 1 ) - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( pxTCB == pxCurrentTCB ) { /* The task calling this function is querying its own state. */ eReturn = eRunning; @@ -2149,19 +2212,27 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ { - /* If the task is not in any other state, it must be in the - * Ready (including pending ready) state. */ - #if ( configNUM_CORES > 1 ) + #if ( configNUM_CORES == 1 ) + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + eReturn = eReady; + } + #else /* #if ( configNUM_CORES == 1 ) */ + { if( taskTASK_IS_RUNNING( pxTCB ) ) { /* Is it actively running on a core? */ eReturn = eRunning; } else - #endif - { - eReturn = eReady; + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + eReturn = eReady; + } } + #endif /* #if ( configNUM_CORES == 1 ) */ } } @@ -2241,7 +2312,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; + #if ( configNUM_CORES > 1 ) BaseType_t xYieldForTask = pdFALSE; + #endif configASSERT( uxNewPriority < configMAX_PRIORITIES ); @@ -2280,13 +2353,35 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( uxNewPriority > uxCurrentBasePriority ) { #if ( configNUM_CORES == 1 ) - if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) - #endif + { + if( pxTCB != pxCurrentTCB ) + { + /* The priority of a task other than the currently + * running task is being raised. Is the priority being + * raised above that of the running task? */ + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The priority of the running task is being raised, + * but the running task must already be the highest + * priority task able to run so no yield is required. */ + } + } + #else /* #if ( configNUM_CORES == 1 ) */ { /* The priority of a task is being raised so * perform a yield for this task later. */ xYieldForTask = pdTRUE; } + #endif /* #if ( configNUM_CORES == 1 ) */ } else if( taskTASK_IS_RUNNING( pxTCB ) ) { @@ -2370,50 +2465,51 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - #if ( configNUM_CORES > 1 ) - + #if ( configNUM_CORES == 1 ) + { + mtCOVERAGE_TEST_MARKER(); + } + #else + { /* It's possible that xYieldForTask was already set to pdTRUE because * its priority is being raised. However, since it is not in a ready list * we don't actually need to yield for it. */ xYieldForTask = pdFALSE; + } #endif } - #if ( configUSE_PREEMPTION == 1 ) + #if ( configNUM_CORES == 1 ) { if( xYieldRequired != pdFALSE ) { - #if ( configNUM_CORES == 1 ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - #else - { - prvYieldCore( ( BaseType_t ) pxTCB->xTaskRunState ); - } - #endif + taskYIELD_IF_USING_PREEMPTION(); } - else if( xYieldForTask != pdFALSE ) + else { - #if ( configNUM_CORES == 1 ) + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldRequired != pdFALSE ) { - if( uxNewPriority >= pxCurrentTCB->uxPriority ) - { - taskYIELD_IF_USING_PREEMPTION(); - } + prvYieldCore( pxTCB->xTaskRunState ); } - #else + else if( xYieldForTask != pdFALSE ) { - ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + prvYieldForTask( pxTCB, pdTRUE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); } - #endif - } - else - { - mtCOVERAGE_TEST_MARKER(); } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* if ( configUSE_PREEMPTION == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ /* Remove compiler warning about unused variables when the port * optimised task selection is not being used. */ @@ -2426,7 +2522,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) +#if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ) { @@ -2454,10 +2550,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } taskEXIT_CRITICAL(); } -#endif /* if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ +#endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) +#if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) { TCB_t * pxTCB; @@ -2472,7 +2568,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) return uxCoreAffinityMask; } -#endif /* if ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ +#endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -2491,7 +2587,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) taskEXIT_CRITICAL(); } -#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ /*-----------------------------------------------------------*/ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -2519,7 +2615,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) taskEXIT_CRITICAL(); } -#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskSuspend == 1 ) @@ -2527,7 +2623,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskSuspend( TaskHandle_t xTaskToSuspend ) { TCB_t * pxTCB; - TaskRunning_t xTaskRunningOnCore; + #if ( configNUM_CORES > 1 ) + TaskRunning_t xTaskRunningOnCore; + #endif taskENTER_CRITICAL(); { @@ -2537,9 +2635,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) traceTASK_SUSPEND( pxTCB ); - #if ( configNUM_CORES == 1 ) - xTaskRunningOnCore = ( TaskRunning_t ) 0; - #else + #if ( configNUM_CORES > 1 ) xTaskRunningOnCore = pxTCB->xTaskRunState; #endif @@ -2583,43 +2679,35 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - if( xSchedulerRunning != pdFALSE ) - { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - prvResetNextTaskUnblockTime(); - } - else + #if ( configNUM_CORES == 1 ) { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); - if( taskTASK_IS_RUNNING( pxTCB ) ) - { if( xSchedulerRunning != pdFALSE ) { - if( xTaskRunningOnCore == portGET_CORE_ID() ) - { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - vTaskYieldWithinAPI(); - #else - portYIELD_WITHIN_API(); - #endif - } - else + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + taskENTER_CRITICAL(); { - prvYieldCore( xTaskRunningOnCore ); + prvResetNextTaskUnblockTime(); } - taskEXIT_CRITICAL(); } else { - taskEXIT_CRITICAL(); - #if ( configNUM_CORES == 1 ) + mtCOVERAGE_TEST_MARKER(); + } + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { /* The scheduler is not running, but the task that was pointed * to by pxCurrentTCB has just been suspended and pxCurrentTCB * must be adjusted to point to a different task. */ @@ -2635,7 +2723,48 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { vTaskSwitchContext(); } - #else /* if ( configNUM_CORES == 1 ) */ + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( taskTASK_IS_RUNNING( pxTCB ) ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( xTaskRunningOnCore == portGET_CORE_ID() ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + + taskEXIT_CRITICAL(); + } + else + { + taskEXIT_CRITICAL(); + + configASSERT( pxTCB == pxCurrentTCBs[ xTaskRunningOnCore ] ); /* The scheduler is not running, but the task that was pointed * to by pxCurrentTCB has just been suspended and pxCurrentTCB @@ -2660,13 +2789,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; } } - #endif /* if ( configNUM_CORES == 1 ) */ + } } - } - else - { - taskEXIT_CRITICAL(); - } + else + { + taskEXIT_CRITICAL(); + } + } /* taskEXIT_CRITICAL() - already exited in one of three cases above. */ + #endif /* #if ( configNUM_CORES == 1 ) */ } #endif /* INCLUDE_vTaskSuspend */ @@ -2731,14 +2861,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ - if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( pxTCB != NULL ) ) + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) #else /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. It is also impossible to resume a task - * that is actively running on another core but it is too dangerous - * to check their run state here. Safer to get into a critical section - * and check if it is actually suspended or not below. */ + * that is actively running on another core but it is not safe + * to check their run state here. Therefore, we get into a critical + * section and check if the task is actually suspended or not. */ if( pxTCB != NULL ) #endif { @@ -2753,12 +2883,30 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* Check if Yield is required for this Task in prvYieldForTask. */ - #if ( configUSE_PREEMPTION == 1 ) + #if ( configNUM_CORES == 1 ) { - ( void ) prvYieldForTask( pxTCB, pdTRUE, pdTRUE ); + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + * but will leave the lists in the correct state for the + * next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -2782,7 +2930,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) { BaseType_t xYieldRequired = pdFALSE; - BaseType_t xYieldCoreID; TCB_t * const pxTCB = xTaskToResume; UBaseType_t uxSavedInterruptStatus; @@ -2815,24 +2962,25 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* Check the ready lists can be accessed. */ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - /* Ready lists can be accessed so move the task from the - * suspended list to the ready list directly. */ - - /* Check if yield is required for this task in prvYieldForTask. */ - xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreID ) ) - { - /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using portYIELD_FROM_ISR. */ - xYieldPendings[ xYieldCoreID ] = pdTRUE; - xYieldRequired = pdTRUE; - } - else + #if ( configNUM_CORES == 1 ) { - mtCOVERAGE_TEST_MARKER(); + /* Ready lists can be accessed so move the task from the + * suspended list to the ready list directly. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + + /* Mark that a yield is pending in case the user is not + * using the return value to initiate a context switch + * from the ISR using portYIELD_FROM_ISR. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + #endif /* #if ( configNUM_CORES == 1 ) */ ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -2844,6 +2992,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * unsuspended. */ vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) + { + prvYieldForTask( pxTCB, pdTRUE ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xYieldRequired = pdTRUE; + } + } + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ } else { @@ -2895,13 +3054,14 @@ static BaseType_t prvCreateIdleTasks( void ) } } - /* Append the idle task number to the end of the name if there is space */ + /* Append the idle task number to the end of the name if there is space. */ #if ( configNUM_CORES > 1 ) + { if( x < configMAX_TASK_NAME_LEN ) { cIdleName[ x++ ] = ( char ) xCoreID + '0'; - /* And append a null character if there is space */ + /* And append a null character if there is space. */ if( x < configMAX_TASK_NAME_LEN ) { cIdleName[ x ] = '\0'; @@ -2915,7 +3075,8 @@ static BaseType_t prvCreateIdleTasks( void ) { mtCOVERAGE_TEST_MARKER(); } - #endif /* ( configNUM_CORES > 1 ) */ + } + #endif /* #if ( configNUM_CORES > 1 ) */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { @@ -2951,7 +3112,7 @@ static BaseType_t prvCreateIdleTasks( void ) xIdleTaskStackBuffers[ xCoreID - 1 ], &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #endif /* if ( configNUM_CORES > 1 ) */ + #endif /* #if ( configNUM_CORES > 1 ) */ if( xIdleTaskHandles[ xCoreID ] != NULL ) { @@ -2985,7 +3146,7 @@ static BaseType_t prvCreateIdleTasks( void ) portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #endif + #endif /* #if ( configNUM_CORES > 1 ) */ } #endif /* configSUPPORT_STATIC_ALLOCATION */ } @@ -3115,16 +3276,16 @@ void vTaskSuspendAll( void ) * the above increment elsewhere. */ portMEMORY_BARRIER(); } - #else /* ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUM_CORES == 1 ) */ { UBaseType_t ulState; - /* This must only be called from within a task */ + /* This must only be called from within a task. */ portASSERT_IF_IN_ISR(); if( xSchedulerRunning != pdFALSE ) { - /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. * We must disable interrupts before we grab the locks in the event that this task is * interrupted and switches context before incrementing uxSchedulerSuspended. * It is safe to re-enable interrupts after releasing the ISR lock and incrementing @@ -3166,7 +3327,7 @@ void vTaskSuspendAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ } /*----------------------------------------------------------*/ @@ -3240,9 +3401,6 @@ BaseType_t xTaskResumeAll( void ) BaseType_t xAlreadyYielded = pdFALSE; #if ( configNUM_CORES > 1 ) - - /* Scheduler running status is not checked in vTaskSuspendAll in single - * core implementation. This condition is only required for multiple cores. */ if( xSchedulerRunning != pdFALSE ) #endif { @@ -3254,7 +3412,6 @@ BaseType_t xTaskResumeAll( void ) taskENTER_CRITICAL(); { BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); /* If uxSchedulerSuspended is zero then this function does not match a @@ -3291,12 +3448,13 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* if ( configNUM_CORES == 1 ) */ - + #else /* #if ( configNUM_CORES == 1 ) */ + { /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. * If the current core yielded then vTaskSwitchContext() has already been called * which sets xYieldPendings for the current core to pdTRUE. */ - #endif /* if ( configNUM_CORES == 1 ) */ + } + #endif /* #if ( configNUM_CORES == 1 ) */ } if( pxTCB != NULL ) @@ -3328,7 +3486,7 @@ BaseType_t xTaskResumeAll( void ) { if( xTaskIncrementTick() != pdFALSE ) { - /* other cores are interrupted from + /* Other cores are interrupted from * within xTaskIncrementTick(). */ xYieldPendings[ xCoreID ] = pdTRUE; } @@ -3354,13 +3512,13 @@ BaseType_t xTaskResumeAll( void ) { xAlreadyYielded = pdTRUE; } - #endif + #endif /* #if ( configUSE_PREEMPTION != 0 ) */ #if ( configNUM_CORES == 1 ) { taskYIELD_IF_USING_PREEMPTION(); } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -3787,25 +3945,33 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - taskENTER_CRITICAL(); + #if ( configNUM_CORES == 1 ) { - BaseType_t xYieldCoreID; - - xYieldCoreID = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); - - /* Preemption is on, but a context switch should only be - * performed if the unblocked task has a priority that is - * higher than the currently executing task. */ - if( taskVALID_CORE_ID( xYieldCoreID ) ) + /* Preemption is on, but a context switch should only be + * performed if the unblocked task has a priority that is + * higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { /* Pend the yield to be performed when the scheduler - * is unsuspended. */ - xYieldPendings[ xYieldCoreID ] = pdTRUE; + * is unsuspended. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + #else /* #if ( configNUM_CORES == 1 ) */ + { + taskENTER_CRITICAL(); + { + prvYieldForTask( pxTCB, pdFALSE ); + } + taskEXIT_CRITICAL(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } else { @@ -3828,8 +3994,8 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) UBaseType_t x; - BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; - #endif /* configUSE_PREEMPTION */ + BaseType_t xYieldRequiredForCore[ configNUM_CORES ] = { pdFALSE }; + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL(); @@ -3927,16 +4093,28 @@ BaseType_t xTaskIncrementTick( void ) * context switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - BaseType_t xYieldCoreID; - - xYieldCoreID = prvYieldForTask( pxTCB, pdTRUE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreID ) ) + #if( configNUM_CORES == 1 ) + { + /* Preemption is on, but a context switch should + * only be performed if the unblocked task has a + * priority that is equal to or higher than the + * currently executing task. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if( configNUM_CORES == 1 ) */ { - xCoreYieldList[ xYieldCoreID ] = pdTRUE; + prvYieldForTask( pxTCB, pdTRUE ); } + #endif /* #if( configNUM_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } } } @@ -3946,40 +4124,19 @@ BaseType_t xTaskIncrementTick( void ) * writer has not explicitly turned time slicing off. */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - #if ( configNUM_CORES == 1 ) + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - xSwitchRequired = pdTRUE; + xYieldRequiredForCore[ x ] = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - #else /* if ( configNUM_CORES == 1 ) */ - { - /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not - * force a context switch that would just shuffle tasks around cores */ - - /* TODO: There are certainly better ways of doing this that would reduce - * the number of interrupts and also potentially help prevent tasks from - * moving between cores as often. This, however, works for now. */ - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) - { - xCoreYieldList[ x ] = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - } - #endif /* ( configNUM_CORES == 1 ) */ } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ #if ( configUSE_TICK_HOOK == 1 ) { @@ -4002,22 +4159,18 @@ BaseType_t xTaskIncrementTick( void ) { if( xYieldPendings[ x ] != pdFALSE ) { - xCoreYieldList[ x ] = pdTRUE; + xYieldRequiredForCore[ x ] = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - } - #endif /* configUSE_PREEMPTION */ - #if ( configUSE_PREEMPTION == 1 ) - { #if ( configNUM_CORES == 1 ) { /* For single core the core ID is always 0. */ - if( xCoreYieldList[ 0 ] != pdFALSE ) + if( xYieldRequiredForCore[ 0 ] != pdFALSE ) { xSwitchRequired = pdTRUE; } @@ -4026,10 +4179,9 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUM_CORES == 1 ) */ { BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) @@ -4038,7 +4190,7 @@ BaseType_t xTaskIncrementTick( void ) if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) #endif { - if( xCoreYieldList[ x ] != pdFALSE ) + if( xYieldRequiredForCore[ x ] != pdFALSE ) { if( x == ( UBaseType_t ) xCoreID ) { @@ -4056,9 +4208,9 @@ BaseType_t xTaskIncrementTick( void ) } } } - #endif /* ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } else { @@ -4267,33 +4419,21 @@ BaseType_t xTaskIncrementTick( void ) } } #else /* if ( configNUM_CORES == 1 ) */ - void vTaskSwitchContext( void ) - { - BaseType_t xCoreID; - - xCoreID = portGET_CORE_ID(); - - vTaskSwitchContextForCore( xCoreID ); - } -#endif /* if ( configNUM_CORES == 1 ) */ -/*-----------------------------------------------------------*/ - -#if ( configNUM_CORES > 1 ) - void vTaskSwitchContextForCore( BaseType_t xCoreID ) + void vTaskSwitchContext( BaseType_t xCoreID ) { /* Acquire both locks: * - The ISR lock protects the ready list from simultaneous access by - * both other ISRs and tasks. + * both other ISRs and tasks. * - We also take the task lock to pause here in case another core has - * suspended the scheduler. We don't want to simply set xYieldPending - * and move on if another core suspended the scheduler. We should only - * do that if the current core has suspended the scheduler. */ + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ - portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_TASK_LOCK(); /* Must always acquire the task lock first. */ portGET_ISR_LOCK(); { /* vTaskSwitchContextForCore() must never be called from within a critical section. - * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + * This is not necessarily true for single core FreeRTOS, but it is for this SMP port. */ #if ( portCRITICAL_NESTING_IN_TCB == 1 ) configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); #endif @@ -4347,8 +4487,7 @@ BaseType_t xTaskIncrementTick( void ) } #endif - /* Select a new task to run using either the generic C or port - * optimised asm code. */ + /* Select a new task to run. */ ( void ) prvSelectHighestPriorityTask( xCoreID ); traceTASK_SWITCHED_IN(); @@ -4464,7 +4603,6 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { TCB_t * pxUnblockedTCB; BaseType_t xReturn; - BaseType_t xYieldCoreID; /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be * called from a critical section within an ISR. */ @@ -4509,27 +4647,40 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); } - xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreID ) ) + #if ( configNUM_CORES == 1 ) { - /* Return true if the task removed from the event list has a higher - * priority than the calling task. This allows the calling task to know if - * it should force a context switch now. */ - xReturn = pdTRUE; + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + * priority than the calling task. This allows the calling task to know if + * it should force a context switch now. */ + xReturn = pdTRUE; - /* Mark that a yield is pending in case the user is not using the - * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - #if ( configUSE_PREEMPTION == 1 ) + /* Mark that a yield is pending in case the user is not using the + * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else { - xYieldPendings[ xYieldCoreID ] = pdTRUE; + xReturn = pdFALSE; } - #endif } - else + #else /* #if ( configNUM_CORES == 1 ) */ { xReturn = pdFALSE; + + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxUnblockedTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xReturn = pdTRUE; + } + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUM_CORES == 1 ) */ return xReturn; } @@ -4539,7 +4690,6 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) { TCB_t * pxUnblockedTCB; - BaseType_t xYieldCoreID; /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event flags implementation. */ @@ -4574,20 +4724,30 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); - #if ( configUSE_PREEMPTION == 1 ) + #if ( configNUM_CORES == 1 ) { - taskENTER_CRITICAL(); + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) { - xYieldCoreID = prvYieldForTask( pxUnblockedTCB, pdFALSE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreID ) ) + /* The unblocked task has a priority above that of the calling task, so + * a context switch is required. This function is called with the + * scheduler suspended so xYieldPending is set so the context switch + * occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPendings[ 0 ] = pdTRUE; + } + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + #if ( configUSE_PREEMPTION == 1 ) + { + taskENTER_CRITICAL(); { - xYieldPendings[ xYieldCoreID ] = pdTRUE; + prvYieldForTask( pxUnblockedTCB, pdFALSE ); } + taskEXIT_CRITICAL(); } - taskEXIT_CRITICAL(); + #endif } - #endif /* ( configUSE_PREEMPTION == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ } /*-----------------------------------------------------------*/ @@ -4678,7 +4838,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - /* Must be called from within a critical section */ + /* Must be called from within a critical section. */ xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -4721,16 +4881,21 @@ void vTaskMissedYield( void ) } #endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ /* * ----------------------------------------------------------- * The MinimalIdle task. * ---------------------------------------------------------- * - * The minimal idle task is used for all the additional Cores in a SMP system. - * There must be only 1 idle task and the rest are minimal idle tasks. + * The minimal idle task is used for all the additional cores in a SMP + * system. There must be only 1 idle task and the rest are minimal idle + * tasks. + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: * - * @todo additional conditional compiles to remove this function. + * void prvMinimalIdleTask( void *pvParameters ); */ #if ( configNUM_CORES > 1 ) @@ -4790,7 +4955,7 @@ void vTaskMissedYield( void ) #endif /* configUSE_MINIMAL_IDLE_HOOK */ } } -#endif /* if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ /* * ----------------------------------------------------------- @@ -4922,7 +5087,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } #endif /* configUSE_TICKLESS_IDLE */ - #if ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) { extern void vApplicationMinimalIdleHook( void ); @@ -4936,7 +5101,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * CALL A FUNCTION THAT MIGHT BLOCK. */ vApplicationMinimalIdleHook(); } - #endif /* configUSE_MINIMAL_IDLE_HOOK */ + #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ } } /*-----------------------------------------------------------*/ @@ -5100,28 +5265,39 @@ static void prvCheckTasksWaitingTermination( void ) { taskENTER_CRITICAL(); { - /* For SMP, multiple idles can be running simultaneously - * and we need to check that other idles did not cleanup while we were - * waiting to enter the critical section. */ - if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + #if( configNUM_CORES == 1 ) { pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - - if( ( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) && ( taskTASK_IS_YIELDING( pxTCB ) == pdFALSE ) ) - { - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; - } - else + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + #else /* #if( configNUM_CORES == 1 ) */ + { + /* For SMP, multiple idles can be running simultaneously + * and we need to check that other idles did not cleanup while we were + * waiting to enter the critical section. */ + if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - /* The TCB to be deleted still has not yet been switched out - * by the scheduler, so we will just exit this loop early and - * try again next time. */ - taskEXIT_CRITICAL(); - break; + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + else + { + /* The TCB to be deleted still has not yet been switched out + * by the scheduler, so we will just exit this loop early and + * try again next time. */ + taskEXIT_CRITICAL(); + break; + } } } + #endif /* #if( configNUM_CORES == 1 ) */ } taskEXIT_CRITICAL(); @@ -5453,7 +5629,7 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - #else + #else /* #if ( configNUM_CORES == 1 ) */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; @@ -5479,7 +5655,7 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - #endif /* if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -5496,7 +5672,9 @@ static void prvResetNextTaskUnblockTime( void ) } else { + #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL(); + #endif { if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { @@ -5507,7 +5685,9 @@ static void prvResetNextTaskUnblockTime( void ) xReturn = taskSCHEDULER_SUSPENDED; } } + #if ( configNUM_CORES > 1 ) taskEXIT_CRITICAL(); + #endif } return xReturn; @@ -5792,17 +5972,11 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) - void vTaskYieldWithinAPI( void ) - { - portYIELD_WITHIN_API(); - } -#else - -/*If not in a critical section then yield immediately. - * Otherwise set xYieldPendings to true to wait to - * yield until exiting the critical section. - */ +#if ( configNUM_CORES > 1 ) + /* If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ void vTaskYieldWithinAPI( void ) { if( pxCurrentTCB->uxCriticalNesting == 0U ) @@ -5814,7 +5988,7 @@ static void prvResetNextTaskUnblockTime( void ) xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } -#endif /* if ( configNUM_CORES == 1 ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -5826,26 +6000,41 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - #if ( configNUM_CORES > 1 ) + #if ( configNUM_CORES == 1 ) + { + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); + } + } + #else /* #if ( configNUM_CORES == 1 ) */ + { if( pxCurrentTCB->uxCriticalNesting == 0U ) { portGET_TASK_LOCK(); portGET_ISR_LOCK(); } - #endif - ( pxCurrentTCB->uxCriticalNesting )++; + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( pxCurrentTCB->uxCriticalNesting == 1 ) - { - portASSERT_IF_IN_ISR(); - #if ( configNUM_CORES > 1 ) if( uxSchedulerSuspended == 0U ) { /* The only time there would be a problem is if this is called @@ -5854,8 +6043,9 @@ static void prvResetNextTaskUnblockTime( void ) * used within vTaskSwitchContext(). */ prvCheckForRunStateChange(); } - #endif + } } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -5867,7 +6057,7 @@ static void prvResetNextTaskUnblockTime( void ) /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) UBaseType_t vTaskEnterCriticalFromISR( void ) { @@ -5892,7 +6082,7 @@ static void prvResetNextTaskUnblockTime( void ) return uxSavedInterruptStatus; } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ #if ( portCRITICAL_NESTING_IN_TCB == 1 ) @@ -5915,7 +6105,11 @@ static void prvResetNextTaskUnblockTime( void ) if( pxCurrentTCB->uxCriticalNesting == 0U ) { - #if ( configNUM_CORES > 1 ) + #if ( configNUM_CORES == 1 ) + { + portENABLE_INTERRUPTS(); + } + #else { BaseType_t xYieldCurrentTask; @@ -5935,11 +6129,7 @@ static void prvResetNextTaskUnblockTime( void ) portYIELD(); } } - #else /* if ( configNUM_CORES > 1 ) */ - { - portENABLE_INTERRUPTS(); - } - #endif /* ( configNUM_CORES > 1 ) */ + #endif /* ( configNUM_CORES == 1 ) */ } else { @@ -5960,7 +6150,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* portCRITICAL_NESTING_IN_TCB */ /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { @@ -6009,7 +6199,7 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) @@ -6324,7 +6514,15 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else + { + vTaskYieldWithinAPI(); + } + #endif } else { @@ -6403,7 +6601,15 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else + { + vTaskYieldWithinAPI(); + } + #endif } else { @@ -6554,13 +6760,28 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - #if ( configUSE_PREEMPTION == 1 ) + #if ( configNUM_CORES == 1 ) + { + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUM_CORES == 1 ) */ { - ( void ) prvYieldForTask( pxTCB, pdFALSE, pdTRUE ); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdFALSE ); + } + #endif } - #endif /* ( configUSE_PREEMPTION == 1 ) */ + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -6588,7 +6809,6 @@ TickType_t uxTaskResetEventItemValue( void ) uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; UBaseType_t uxSavedInterruptStatus; - BaseType_t xYieldCoreId; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -6687,28 +6907,44 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreId ) ) + #if ( configNUM_CORES == 1 ) { - if( pxHigherPriorityTaskWoken != NULL ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { - *pxHigherPriorityTaskWoken = pdTRUE; - } + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter to an ISR - * safe FreeRTOS function. */ - #if ( configUSE_PREEMPTION == 1 ) + /* Mark that a yield is pending in case the user is not + * using the "xHigherPriorityTaskWoken" parameter to an ISR + * safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else { - xYieldPendings[ xYieldCoreId ] = pdTRUE; + mtCOVERAGE_TEST_MARKER(); } - #endif /* ( configUSE_PREEMPTION == 1 ) */ } - else + #else /* #if ( configNUM_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + } + #endif } + #endif /* #if ( configNUM_CORES == 1 ) */ } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); @@ -6728,7 +6964,6 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; UBaseType_t uxSavedInterruptStatus; - BaseType_t xYieldCoreId; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -6783,28 +7018,44 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - xYieldCoreId = prvYieldForTask( pxTCB, pdFALSE, pdFALSE ); - - if( taskVALID_CORE_ID( xYieldCoreId ) ) + #if ( configNUM_CORES == 1 ) { - if( pxHigherPriorityTaskWoken != NULL ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { - *pxHigherPriorityTaskWoken = pdTRUE; - } + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter in an ISR - * safe FreeRTOS function. */ - #if ( configUSE_PREEMPTION == 1 ) + /* Mark that a yield is pending in case the user is not + * using the "xHigherPriorityTaskWoken" parameter in an ISR + * safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else { - xYieldPendings[ xYieldCoreId ] = pdTRUE; + mtCOVERAGE_TEST_MARKER(); } - #endif /* ( configUSE_PREEMPTION == 1 ) */ } - else + #else /* #if ( configNUM_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUM_CORES == 1 ) */ } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); From 96205d74695a955fb57dcd38c9e7a1445f41516a Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Sun, 6 Nov 2022 20:58:43 +0000 Subject: [PATCH 113/164] Corresponding changes in FreeRTOS.h and task.h Signed-off-by: Gaurav Aggarwal --- include/FreeRTOS.h | 7 ++++--- include/task.h | 18 ++++++------------ 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 81d47d7c1fb..55eaf976cbe 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1383,17 +1383,18 @@ typedef struct xSTATIC_TCB xMPU_SETTINGS xDummy2; #endif #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) - UBaseType_t uxDummy25; + UBaseType_t uxDummy26; #endif StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; #if ( configNUM_CORES > 1 ) - BaseType_t xDummy23[ 2 ]; + BaseType_t xDummy23; + UBaseType_t uxDummy24; #endif uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xDummy24; + BaseType_t xDummy25; #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; diff --git a/include/task.h b/include/task.h index 71acb35847d..c031fcf66f9 100644 --- a/include/task.h +++ b/include/task.h @@ -259,7 +259,7 @@ typedef enum #define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) -/* Check if core value is valid */ +/* Checks if core ID is valid. */ #define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) /*----------------------------------------------------------- @@ -3211,17 +3211,11 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, * Sets the pointer to the current TCB to the TCB of the highest priority task * that is ready to run. */ -portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; - -/* - * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY - * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS - * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. - * - * Sets the pointer to the current TCB to the TCB of the highest priority task - * that is ready to run for core. - */ -portDONT_DISCARD void vTaskSwitchContextForCore( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES == 1 ) + portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; +#else + portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; +#endif /* * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY From ea4048ee2a70f307ab4e8e541a3c5bc07e4db4b9 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 7 Nov 2022 11:39:30 +0800 Subject: [PATCH 114/164] Fix the single core compilation * vTaskSwtichContextForCore rename vTaskSwitchContext * vTaskYieldWithinAPI for single core * pxCurrentTCBs for single core in xTaskIncrementTick --- event_groups.c | 20 ++++++++++-- portable/ThirdParty/GCC/RP2040/port.c | 4 +-- queue.c | 46 ++++++++++++++++++++++++--- tasks.c | 23 +++++++++++--- timers.c | 10 +++++- 5 files changed, 89 insertions(+), 14 deletions(-) diff --git a/event_groups.c b/event_groups.c index e68561f3ae3..4d40f5aec83 100644 --- a/event_groups.c +++ b/event_groups.c @@ -258,7 +258,15 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -410,7 +418,15 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index 251e6159ec8..8585833587b 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -344,7 +344,7 @@ void vPortStartFirstTask( void ) * functionality by defining configTASK_RETURN_ADDRESS. Call * vTaskSwitchContext() so link time optimisation does not remove the * symbol. */ - vTaskSwitchContextForCore( portGET_CORE_ID() ); + vTaskSwitchContext( portGET_CORE_ID() ); prvTaskExitError(); /* Should not get here! */ @@ -667,7 +667,7 @@ void xPortPendSVHandler( void ) #endif /* portRUNNING_ON_BOTH_CORES */ " push {r3, r14} \n" " cpsid i \n" - " bl vTaskSwitchContextForCore \n" + " bl vTaskSwitchContext \n" " cpsie i \n" " pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */ " \n" diff --git a/queue.c b/queue.c index 40b27849b02..cc3e1f574a9 100644 --- a/queue.c +++ b/queue.c @@ -89,7 +89,11 @@ typedef struct SemaphoreData * performed just because a higher priority task has been woken. */ #define queueYIELD_IF_USING_PREEMPTION() #else - #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #if ( configNUM_CORES == 1 ) + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #else /* #if ( configNUM_CORES == 1 ) */ + #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #endif /* #if ( configNUM_CORES == 1 ) */ #endif /* @@ -1021,7 +1025,15 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * is also a higher priority task in the pending ready list. */ if( xTaskResumeAll() == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } } else @@ -1482,7 +1494,15 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -1674,7 +1694,15 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -1852,7 +1880,15 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { diff --git a/tasks.c b/tasks.c index 1e7b2c6d7f6..ef172f4df1a 100644 --- a/tasks.c +++ b/tasks.c @@ -4124,17 +4124,32 @@ BaseType_t xTaskIncrementTick( void ) * writer has not explicitly turned time slicing off. */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) + #if ( configNUM_CORES == 1 ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - xYieldRequiredForCore[ x ] = pdTRUE; + xSwitchRequired = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } + #else /* #if ( configNUM_CORES == 1 ) */ + { + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xYieldRequiredForCore[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* #if ( configNUM_CORES == 1 ) */ } #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -4432,7 +4447,7 @@ BaseType_t xTaskIncrementTick( void ) portGET_TASK_LOCK(); /* Must always acquire the task lock first. */ portGET_ISR_LOCK(); { - /* vTaskSwitchContextForCore() must never be called from within a critical section. + /* vTaskSwitchContext() must never be called from within a critical section. * This is not necessarily true for single core FreeRTOS, but it is for this SMP port. */ #if ( portCRITICAL_NESTING_IN_TCB == 1 ) configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); diff --git a/timers.c b/timers.c index 058631175a1..ceff90931a3 100644 --- a/timers.c +++ b/timers.c @@ -688,7 +688,15 @@ * block time to expire. If a command arrived between the * critical section being exited and this yield then the yield * will not cause the task to block. */ - vTaskYieldWithinAPI(); + #if ( configNUM_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUM_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUM_CORES == 1 ) */ } else { From b9e588c73ac7c2d60b3e5d8713384cda1cc52eaa Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 7 Nov 2022 12:12:06 +0800 Subject: [PATCH 115/164] Fix compilation warning --- include/task.h | 5 +++++ tasks.c | 15 +++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/task.h b/include/task.h index c031fcf66f9..e31039e7457 100644 --- a/include/task.h +++ b/include/task.h @@ -3228,6 +3228,11 @@ TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; +/* + * Return the handle of the task running on specified core. + */ +TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) PRIVILEGED_FUNCTION; + /* * Shortcut used by the queue implementation to prevent unnecessary call to * taskYIELD(); diff --git a/tasks.c b/tasks.c index ef172f4df1a..21ca33f347f 100644 --- a/tasks.c +++ b/tasks.c @@ -1994,12 +1994,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) vTaskSuspendAll(); { - configASSERT( uxSchedulerSuspended == 1 ); - /* Minor optimisation. The tick count cannot change in this * block. */ const TickType_t xConstTickCount = xTickCount; + configASSERT( uxSchedulerSuspended == 1 ); + /* Generate the tick time at which the task wants to wake. */ xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; @@ -4916,6 +4916,8 @@ void vTaskMissedYield( void ) #if ( configNUM_CORES > 1 ) static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) { + ( void ) pvParameters; + taskYIELD(); for( ; ; ) @@ -5661,14 +5663,7 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) { - TaskHandle_t xReturn = NULL; - - if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) - { - xReturn = pxCurrentTCBs[ xCoreID ]; - } - - return xReturn; + return pxCurrentTCBs[ xCoreID ]; } #endif /* #if ( configNUM_CORES == 1 ) */ From d2064c11fd8883ce8f40145be859280a0a2bf09d Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 7 Nov 2022 12:25:52 +0800 Subject: [PATCH 116/164] Update xTaskGetCurrentTaskHandleCPU API * Use BaseType_t instead of UBaseType_t --- include/task.h | 2 +- tasks.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/task.h b/include/task.h index e31039e7457..4390fb894ee 100644 --- a/include/task.h +++ b/include/task.h @@ -3231,7 +3231,7 @@ TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; /* * Return the handle of the task running on specified core. */ -TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) PRIVILEGED_FUNCTION; +TaskHandle_t xTaskGetCurrentTaskHandleCPU( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; /* * Shortcut used by the queue implementation to prevent unnecessary call to diff --git a/tasks.c b/tasks.c index 21ca33f347f..49797d45898 100644 --- a/tasks.c +++ b/tasks.c @@ -5661,9 +5661,16 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) + TaskHandle_t xTaskGetCurrentTaskHandleCPU( BaseType_t xCoreID ) { - return pxCurrentTCBs[ xCoreID ]; + TaskHandle_t xReturn = NULL; + + if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } + + return xReturn; } #endif /* #if ( configNUM_CORES == 1 ) */ From de64fa7be2388f247d4291f7cd8fc7d7b0d51283 Mon Sep 17 00:00:00 2001 From: Gaurav Aggarwal Date: Mon, 7 Nov 2022 09:04:43 +0000 Subject: [PATCH 117/164] Make the list traverse loop more readable Signed-off-by: Gaurav Aggarwal --- tasks.c | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/tasks.c b/tasks.c index 49797d45898..db6fc6f2628 100644 --- a/tasks.c +++ b/tasks.c @@ -893,30 +893,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) { List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); - ListItem_t * pxLastTaskItem = pxReadyList->pxIndex->pxPrevious; - ListItem_t * pxTaskItem = pxLastTaskItem; - - if( ( void * ) pxLastTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) - { - pxLastTaskItem = pxLastTaskItem->pxPrevious; - } + const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList ); + ListItem_t * pxIterator; /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority * must not be decremented any further. */ xDecrementTopPriority = pdFALSE; - do + for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) ) { - TCB_t * pxTCB; - - pxTaskItem = pxTaskItem->pxNext; - - if( ( void * ) pxTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) - { - pxTaskItem = pxTaskItem->pxNext; - } - - pxTCB = pxTaskItem->pvOwner; + TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { @@ -972,11 +958,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; { /* Once a task has been selected to run on this core, * move it to the end of the ready task list. */ - uxListRemove( pxTaskItem ); - vListInsertEnd( pxReadyList, pxTaskItem ); + uxListRemove( pxIterator ); + vListInsertEnd( pxReadyList, pxIterator ); break; } - } while( pxTaskItem != pxLastTaskItem ); + } } else { From 3df1ad7da8f3eeb2b96fa9ee05985ffcae6eddb4 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Tue, 8 Nov 2022 16:19:35 +0800 Subject: [PATCH 118/164] Remove unnecessary loop in xTaskIncrementTick for single core --- tasks.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/tasks.c b/tasks.c index db6fc6f2628..4db0e073394 100644 --- a/tasks.c +++ b/tasks.c @@ -3978,10 +3978,10 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; - #if ( configUSE_PREEMPTION == 1 ) + #if ( configUSE_PREEMPTION == 1 ) && ( configNUM_CORES > 1 ) UBaseType_t x; BaseType_t xYieldRequiredForCore[ configNUM_CORES ] = { pdFALSE }; - #endif /* #if ( configUSE_PREEMPTION == 1 ) */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUM_CORES > 1 ) */ #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL(); @@ -4156,22 +4156,10 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) { - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) - { - if( xYieldPendings[ x ] != pdFALSE ) - { - xYieldRequiredForCore[ x ] = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #if ( configNUM_CORES == 1 ) { /* For single core the core ID is always 0. */ - if( xYieldRequiredForCore[ 0 ] != pdFALSE ) + if( xYieldPendings[ 0 ] != pdFALSE ) { xSwitchRequired = pdTRUE; } @@ -4191,7 +4179,7 @@ BaseType_t xTaskIncrementTick( void ) if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) #endif { - if( xYieldRequiredForCore[ x ] != pdFALSE ) + if( ( xYieldRequiredForCore[ x ] != pdFALSE ) || ( xYieldPendings[ x ] != pdFALSE ) ) { if( x == ( UBaseType_t ) xCoreID ) { From ab074f54ee43e1fc662875f3cb960936e34924c6 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Mon, 7 Nov 2022 22:04:52 +0800 Subject: [PATCH 119/164] Update uxSchedulerSuspended with ISR lock in prvCheckForRunStateChange --- tasks.c | 395 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 202 insertions(+), 193 deletions(-) diff --git a/tasks.c b/tasks.c index 4db0e073394..4f25f344be8 100644 --- a/tasks.c +++ b/tasks.c @@ -137,22 +137,22 @@ /*-----------------------------------------------------------*/ -#define taskSELECT_HIGHEST_PRIORITY_TASK() \ -{ \ - UBaseType_t uxTopPriority = uxTopReadyPriority; \ - \ - /* Find the highest priority queue that contains ready tasks. */ \ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ - { \ - configASSERT( uxTopPriority ); \ - --uxTopPriority; \ - } \ - \ - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ -} /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + * the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -374,7 +374,7 @@ typedef tskTCB TCB_t; #if ( configNUM_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else - portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -460,33 +460,37 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t static BaseType_t prvCreateIdleTasks( void ); #if ( configNUM_CORES > 1 ) - /* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a critical section and yields, if so. - */ + +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields, if so. + */ static void prvCheckForRunStateChange( void ); #endif /* #if ( configNUM_CORES > 1 ) */ #if ( configNUM_CORES > 1 ) - /* - * Yields the given core. - */ + +/* + * Yields the given core. + */ static void prvYieldCore( BaseType_t xCoreID ); #endif /* #if ( configNUM_CORES > 1 ) */ #if ( configNUM_CORES > 1 ) - /* - * Yields a core, or cores if multiple priorities are not allowed to run - * simultaneously, to allow the task pxTCB to run. - */ + +/* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ static void prvYieldForTask( TCB_t * pxTCB, const BaseType_t xPreemptEqualPriority ); #endif /* #if ( configNUM_CORES > 1 ) */ #if ( configNUM_CORES > 1 ) - /* - * Selects the highest priority available task for the given core. - */ + +/* + * Selects the highest priority available task for the given core. + */ static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); #endif /* #if ( configNUM_CORES > 1 ) */ @@ -677,13 +681,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) { /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then, do it all over again - * if our state changed again during the reacquisition. */ + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then, do it all over again + * if our state changed again during the reacquisition. */ uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; uxPrevSchedulerSuspended = uxSchedulerSuspended; @@ -695,15 +699,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting > 0U ) { pxThisTCB->uxCriticalNesting = 0U; - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); } else { + portGET_ISR_LOCK(); uxSchedulerSuspended = 0U; - portRELEASE_TASK_LOCK(); } + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + portMEMORY_BARRIER(); configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); @@ -773,11 +778,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /* This must be called from a critical section. */ configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); - #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) - /* No task should yield for this one if it is a lower priority - * than priority level of currently ready tasks. */ - if( pxTCB->uxPriority >= uxTopReadyPriority ) - #endif + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority >= uxTopReadyPriority ) + #endif { xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority; @@ -1657,168 +1663,168 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( configNUM_CORES == 1 ) -static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) -{ - /* Ensure interrupts don't access the task lists while the lists are being - * updated. */ - taskENTER_CRITICAL(); + static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { - uxCurrentNumberOfTasks++; - - if( pxCurrentTCB == NULL ) + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ - pxCurrentTCB = pxNewTCB; + uxCurrentNumberOfTasks++; - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + if( pxCurrentTCB == NULL ) { - /* This is the first task to be created so do the preliminary - * initialisation required. We will not recover if this call - * fails, but we will report the failure. */ - prvInitialiseTaskLists(); + /* There are no other tasks, or all the other tasks are in + * the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* If the scheduler is not already running, make this task the - * current task if it is the highest priority task to be created - * so far. */ - if( xSchedulerRunning == pdFALSE ) - { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + /* If the scheduler is not already running, make this task the + * current task if it is the highest priority task to be created + * so far. */ + if( xSchedulerRunning == pdFALSE ) { - pxCurrentTCB = pxNewTCB; + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } } - else + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) { - mtCOVERAGE_TEST_MARKER(); + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; } - } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); - uxTaskNumber++; + prvAddTaskToReadyList( pxNewTCB ); - #if ( configUSE_TRACE_FACILITY == 1 ) - { - /* Add a counter into the TCB for tracing only. */ - pxNewTCB->uxTCBNumber = uxTaskNumber; + portSETUP_TCB( pxNewTCB ); } - #endif /* configUSE_TRACE_FACILITY */ - traceTASK_CREATE( pxNewTCB ); - - prvAddTaskToReadyList( pxNewTCB ); - - portSETUP_TCB( pxNewTCB ); - } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + if( xSchedulerRunning != pdFALSE ) { - taskYIELD_IF_USING_PREEMPTION(); + /* If the created task is of a higher priority than the current task + * then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } -} #else /* #if ( configNUM_CORES == 1 ) */ -static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) -{ - /* Ensure interrupts don't access the task lists while the lists are being - * updated. */ - taskENTER_CRITICAL(); + static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { - uxCurrentNumberOfTasks++; - - if( xSchedulerRunning == pdFALSE ) + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); { - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) - { - /* This is the first task to be created so do the preliminary - * initialisation required. We will not recover if this call - * fails, but we will report the failure. */ - prvInitialiseTaskLists(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + uxCurrentNumberOfTasks++; - if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( xSchedulerRunning == pdFALSE ) { - BaseType_t xCoreID; + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* Check if a core is free. */ - for( xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) + if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { - if( pxCurrentTCBs[ xCoreID ] == NULL ) - { - pxNewTCB->xTaskRunState = xCoreID; - pxCurrentTCBs[ xCoreID ] = pxNewTCB; - break; - } - else + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) { - mtCOVERAGE_TEST_MARKER(); + if( pxCurrentTCBs[ xCoreID ] == NULL ) + { + pxNewTCB->xTaskRunState = xCoreID; + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - uxTaskNumber++; + uxTaskNumber++; - #if ( configUSE_TRACE_FACILITY == 1 ) - { - /* Add a counter into the TCB for tracing only. */ - pxNewTCB->uxTCBNumber = uxTaskNumber; - } - #endif /* configUSE_TRACE_FACILITY */ - traceTASK_CREATE( pxNewTCB ); + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); - prvAddTaskToReadyList( pxNewTCB ); + prvAddTaskToReadyList( pxNewTCB ); - portSETUP_TCB( pxNewTCB ); + portSETUP_TCB( pxNewTCB ); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than another - * currently running task and preemption is on then it should - * run now. */ - #if ( configUSE_PREEMPTION == 1 ) - prvYieldForTask( pxNewTCB, pdFALSE ); - #endif - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than another + * currently running task and preemption is on then it should + * run now. */ + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxNewTCB, pdFALSE ); + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + taskEXIT_CRITICAL(); } - taskEXIT_CRITICAL(); -} #endif /* #if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -1864,11 +1870,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* If the task is running (or yielding), we must add it to the * termination list so that an idle task can delete it when it is * no longer running. */ - #if ( configNUM_CORES == 1 ) - if( pxTCB == pxCurrentTCB ) - #else - if( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ) - #endif + #if ( configNUM_CORES == 1 ) + if( pxTCB == pxCurrentTCB ) + #else + if( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ) + #endif { /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. @@ -2298,9 +2304,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; - #if ( configNUM_CORES > 1 ) - BaseType_t xYieldForTask = pdFALSE; - #endif + + #if ( configNUM_CORES > 1 ) + BaseType_t xYieldForTask = pdFALSE; + #endif configASSERT( uxNewPriority < configMAX_PRIORITIES ); @@ -2609,6 +2616,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskSuspend( TaskHandle_t xTaskToSuspend ) { TCB_t * pxTCB; + #if ( configNUM_CORES > 1 ) TaskRunning_t xTaskRunningOnCore; #endif @@ -2875,8 +2883,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) { /* This yield may not cause the task just resumed to run, - * but will leave the lists in the correct state for the - * next yield. */ + * but will leave the lists in the correct state for the + * next yield. */ taskYIELD_IF_USING_PREEMPTION(); } else @@ -2957,8 +2965,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) xYieldRequired = pdTRUE; /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using portYIELD_FROM_ISR. */ + * using the return value to initiate a context switch + * from the ISR using portYIELD_FROM_ISR. */ xYieldPendings[ 0 ] = pdTRUE; } else @@ -3933,13 +3941,13 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) { #if ( configNUM_CORES == 1 ) { - /* Preemption is on, but a context switch should only be - * performed if the unblocked task has a priority that is - * higher than the currently executing task. */ + /* Preemption is on, but a context switch should only be + * performed if the unblocked task has a priority that is + * higher than the currently executing task. */ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { /* Pend the yield to be performed when the scheduler - * is unsuspended. */ + * is unsuspended. */ xYieldPendings[ 0 ] = pdTRUE; } else @@ -4079,7 +4087,7 @@ BaseType_t xTaskIncrementTick( void ) * context switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - #if( configNUM_CORES == 1 ) + #if ( configNUM_CORES == 1 ) { /* Preemption is on, but a context switch should * only be performed if the unblocked task has a @@ -5256,7 +5264,7 @@ static void prvCheckTasksWaitingTermination( void ) { taskENTER_CRITICAL(); { - #if( configNUM_CORES == 1 ) + #if ( configNUM_CORES == 1 ) { pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); @@ -5663,9 +5671,9 @@ static void prvResetNextTaskUnblockTime( void ) } else { - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL(); - #endif + #if ( configNUM_CORES > 1 ) + taskENTER_CRITICAL(); + #endif { if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { @@ -5676,9 +5684,9 @@ static void prvResetNextTaskUnblockTime( void ) xReturn = taskSCHEDULER_SUSPENDED; } } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL(); - #endif + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL(); + #endif } return xReturn; @@ -5964,10 +5972,11 @@ static void prvResetNextTaskUnblockTime( void ) /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) - /* If not in a critical section then yield immediately. - * Otherwise set xYieldPendings to true to wait to - * yield until exiting the critical section. - */ + +/* If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ void vTaskYieldWithinAPI( void ) { if( pxCurrentTCB->uxCriticalNesting == 0U ) @@ -6903,15 +6912,15 @@ TickType_t uxTaskResetEventItemValue( void ) if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { /* The notified task has a priority above the currently - * executing task so a yield is required. */ + * executing task so a yield is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; } /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter to an ISR - * safe FreeRTOS function. */ + * using the "xHigherPriorityTaskWoken" parameter to an ISR + * safe FreeRTOS function. */ xYieldPendings[ 0 ] = pdTRUE; } else @@ -6933,7 +6942,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } #endif /* #if ( configNUM_CORES == 1 ) */ } From 3f9366f82139adeec7228b4e56c9fe2a2247ec5a Mon Sep 17 00:00:00 2001 From: Laukik Hase Date: Wed, 12 Oct 2022 02:57:32 +0530 Subject: [PATCH 120/164] Updated ESP32 port-layer to ESP-IDF `v4.4.2` (#572) * Xtensa_ESP32: Added esp-idf v4.4.2 specific changes * Xtensa_ESP32: Updated SPDX license identifiers --- .../include/FreeRTOSConfig_arch.h | 134 +++++++ .../GCC/Xtensa_ESP32/include/port_systick.h | 20 + .../GCC/Xtensa_ESP32/include/portmacro.h | 159 ++++---- .../GCC/Xtensa_ESP32/include/xtensa_api.h | 111 +----- .../GCC/Xtensa_ESP32/include/xtensa_context.h | 371 +----------------- .../GCC/Xtensa_ESP32/include/xtensa_rtos.h | 6 +- portable/ThirdParty/GCC/Xtensa_ESP32/port.c | 251 +++++++----- .../ThirdParty/GCC/Xtensa_ESP32/port_common.c | 159 ++++++++ .../GCC/Xtensa_ESP32/port_systick.c | 174 ++++++++ .../ThirdParty/GCC/Xtensa_ESP32/portasm.S | 14 +- .../ThirdParty/GCC/Xtensa_ESP32/xtensa_init.c | 33 +- .../ThirdParty/GCC/Xtensa_ESP32/xtensa_intr.c | 190 --------- .../GCC/Xtensa_ESP32/xtensa_intr_asm.S | 232 ----------- .../GCC/Xtensa_ESP32/xtensa_vector_defaults.S | 76 +++- .../GCC/Xtensa_ESP32/xtensa_vectors.S | 22 +- 15 files changed, 855 insertions(+), 1097 deletions(-) create mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/include/FreeRTOSConfig_arch.h create mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h create mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/port_common.c create mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/port_systick.c delete mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr.c delete mode 100644 portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr_asm.S diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/FreeRTOSConfig_arch.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/FreeRTOSConfig_arch.h new file mode 100644 index 00000000000..cbc46d5fe16 --- /dev/null +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/FreeRTOSConfig_arch.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: 2022 Amazon.com, Inc. or its affiliates + * + * SPDX-License-Identifier: MIT + * + * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD + */ +/* + * FreeRTOS Kernel V10.4.3 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. If you wish to use our Amazon + * FreeRTOS name, please do so in a fair use way that does not cause confusion. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + * 1 tab == 4 spaces! + */ + +#ifndef FREERTOS_CONFIG_XTENSA_H +#define FREERTOS_CONFIG_XTENSA_H + +#include "sdkconfig.h" + +/* enable use of optimized task selection by the scheduler */ +#if defined (CONFIG_FREERTOS_OPTIMIZED_SCHEDULER) && !defined(configUSE_PORT_OPTIMISED_TASK_SELECTION) +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif + +#define XT_USE_THREAD_SAFE_CLIB 0 +#undef XT_USE_SWPRI + +#if CONFIG_FREERTOS_CORETIMER_0 +#define XT_TIMER_INDEX 0 +#elif CONFIG_FREERTOS_CORETIMER_1 +#define XT_TIMER_INDEX 1 +#endif + +#ifndef __ASSEMBLER__ +/** + * This function is defined to provide a deprecation warning whenever + * XT_CLOCK_FREQ macro is used. + * Update the code to use esp_clk_cpu_freq function instead. + * @return current CPU clock frequency, in Hz + */ +int xt_clock_freq(void) __attribute__((deprecated)); + +#define XT_CLOCK_FREQ (xt_clock_freq()) + +#endif // __ASSEMBLER__ + +/* Required for configuration-dependent settings */ +#include + +/* configASSERT behaviour */ +#ifndef __ASSEMBLER__ +#include +#include "esp_rom_sys.h" +#if CONFIG_IDF_TARGET_ESP32 +#include "esp32/rom/ets_sys.h" // will be removed in idf v5.0 +#elif CONFIG_IDF_TARGET_ESP32S2 +#include "esp32s2/rom/ets_sys.h" +#elif CONFIG_IDF_TARGET_ESP32S3 +#include "esp32s3/rom/ets_sys.h" +#endif +#endif // __ASSEMBLER__ + +// If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro +// configASSERT_DEFINED remains unset (meaning some warnings are avoided) +#ifdef configASSERT +#undef configASSERT +#if defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE) +#define configASSERT(a) if (unlikely(!(a))) { \ + esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \ + __FUNCTION__); \ + } +#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_ABORT) +#define configASSERT(a) assert(a) +#endif +#endif + +#if CONFIG_FREERTOS_ASSERT_ON_UNTESTED_FUNCTION +#define UNTESTED_FUNCTION() { esp_rom_printf("Untested FreeRTOS function %s\r\n", __FUNCTION__); configASSERT(false); } while(0) +#else +#define UNTESTED_FUNCTION() +#endif + +#define configXT_BOARD 1 /* Board mode */ +#define configXT_SIMULATOR 0 + +/* The maximum interrupt priority from which FreeRTOS.org API functions can + be called. Only API functions that end in ...FromISR() can be used within + interrupts. */ +#define configMAX_SYSCALL_INTERRUPT_PRIORITY XCHAL_EXCM_LEVEL + +/* Stack alignment, architecture specifc. Must be a power of two. */ +#define configSTACK_ALIGNMENT 16 + + +/* The Xtensa port uses a separate interrupt stack. Adjust the stack size + * to suit the needs of your specific application. + * Size needs to be aligned to the stack increment, since the location of + * the stack for the 2nd CPU will be calculated using configISR_STACK_SIZE. + */ +#ifndef configISR_STACK_SIZE +#define configISR_STACK_SIZE ((CONFIG_FREERTOS_ISR_STACKSIZE + configSTACK_ALIGNMENT - 1) & (~(configSTACK_ALIGNMENT - 1))) +#endif + +#ifndef __ASSEMBLER__ +#if CONFIG_APPTRACE_SV_ENABLE +extern uint32_t port_switch_flag[]; +#define os_task_switch_is_pended(_cpu_) (port_switch_flag[_cpu_]) +#else +#define os_task_switch_is_pended(_cpu_) (false) +#endif +#endif + +#endif // FREERTOS_CONFIG_XTENSA_H diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h new file mode 100644 index 00000000000..18b47f3154e --- /dev/null +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h @@ -0,0 +1,20 @@ +/* + * SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Set up the SysTick interrupt + */ +void vPortSetupTimer(void); + +#ifdef __cplusplus +} +#endif diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index 7a3c1acd96d..392196c8e02 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -74,12 +74,20 @@ #include #include /* required for XSHAL_CLIB */ #include + #include "soc/spinlock.h" #include "esp_timer.h" /* required for FreeRTOS run time stats */ #include "esp_system.h" #include "esp_idf_version.h" + #include "esp_heap_caps.h" + /* TODO: Resolve build warnings generated due to this header inclusion */ + #include "hal/cpu_hal.h" - #include + /* TODO: These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */ + #include + #include + + #include "soc/cpu.h" #include "soc/soc_memory_layout.h" #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) #include "soc/compare_set.h" @@ -127,54 +135,12 @@ #include "esp_attr.h" /* "mux" data structure (spinlock) */ - typedef struct - { - /* owner field values: - * 0 - Uninitialized (invalid) - * portMUX_FREE_VAL - Mux is free, can be locked by either CPU - * CORE_ID_REGVAL_PRO / CORE_ID_REGVAL_APP - Mux is locked to the particular core - * - * Any value other than portMUX_FREE_VAL, CORE_ID_REGVAL_PRO, CORE_ID_REGVAL_APP indicates corruption - */ - uint32_t owner; - - /* count field: - * If mux is unlocked, count should be zero. - * If mux is locked, count is non-zero & represents the number of recursive locks on the mux. - */ - uint32_t count; - #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG - const char * lastLockedFn; - int lastLockedLine; - #endif - } portMUX_TYPE; - - #define portMUX_FREE_VAL 0xB33FFFFF - -/* Special constants for vPortCPUAcquireMutexTimeout() */ - #define portMUX_NO_TIMEOUT ( -1 ) /* When passed for 'timeout_cycles', spin forever if necessary */ - #define portMUX_TRY_LOCK 0 /* Try to acquire the spinlock a single time only */ - -/* Keep this in sync with the portMUX_TYPE struct definition please. */ - #ifndef CONFIG_FREERTOS_PORTMUX_DEBUG - #define portMUX_INITIALIZER_UNLOCKED \ - { \ - .owner = portMUX_FREE_VAL, \ - .count = 0, \ - } - #else - #define portMUX_INITIALIZER_UNLOCKED \ - { \ - .owner = portMUX_FREE_VAL, \ - .count = 0, \ - .lastLockedFn = "(never locked)", \ - .lastLockedLine = -1 \ - } - #endif /* ifndef CONFIG_FREERTOS_PORTMUX_DEBUG */ - - - #define portASSERT_IF_IN_ISR() vPortAssertIfInISR() - void vPortAssertIfInISR(); + typedef spinlock_t portMUX_TYPE; /**< Spinlock type used by FreeRTOS critical sections */ + #define portMUX_INITIALIZER_UNLOCKED SPINLOCK_INITIALIZER /**< Spinlock initializer */ + #define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */ + #define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */ + #define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */ + #define portMUX_INITIALIZE(mux) spinlock_initialize(mux) /*< Initialize a spinlock to its unlocked state */ #define portCRITICAL_NESTING_IN_TCB 1 @@ -200,7 +166,7 @@ * This all assumes that interrupts are either entirely disabled or enabled. Interrupt priority levels * will break this scheme. * - * Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vTaskEnterCritical, meaning + * Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vPortEnterCritical, meaning * that either function can be called both from ISR as well as task context. This is not standard FreeRTOS * behaviour; please keep this in mind if you need any compatibility with other FreeRTOS implementations. */ @@ -255,6 +221,8 @@ } \ } while( 0 ) + #define portASSERT_IF_IN_ISR() vPortAssertIfInISR() + void vPortAssertIfInISR(void); /* Critical section management. NW-TODO: replace XTOS_SET_INTLEVEL with more efficient version, if any? */ /* These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level. */ @@ -266,18 +234,22 @@ /* Cleaner solution allows nested interrupts disabling and restoring via local registers or stack. */ /* They can be called from interrupts too. */ /* WARNING: Only applies to current CPU. See notes above. */ - static inline unsigned portENTER_CRITICAL_NESTED() + static inline UBaseType_t __attribute__( ( always_inline ) ) xPortSetInterruptMaskFromISR( void ) { - unsigned state = XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL ); - + UBaseType_t prev_int_level = XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL ); portbenchmarkINTERRUPT_DISABLE(); - return state; + return prev_int_level; + } + + static inline void __attribute__( ( always_inline ) ) vPortClearInterruptMaskFromISR( UBaseType_t prev_level ) + { + portbenchmarkINTERRUPT_RESTORE( prev_level ); + XTOS_RESTORE_JUST_INTLEVEL( prev_level ); } - #define portEXIT_CRITICAL_NESTED( state ) do { portbenchmarkINTERRUPT_RESTORE( state ); XTOS_RESTORE_JUST_INTLEVEL( state ); } while( 0 ) /* These FreeRTOS versions are similar to the nested versions above */ - #define portSET_INTERRUPT_MASK_FROM_ISR() portENTER_CRITICAL_NESTED() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( state ) portEXIT_CRITICAL_NESTED( state ) + #define portSET_INTERRUPT_MASK_FROM_ISR() xPortSetInterruptMaskFromISR() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( prev_level ) vPortClearInterruptMaskFromISR( prev_level ) /*Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force */ /*the stack memory to always be internal. */ @@ -352,7 +324,7 @@ #else static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set) { - #if defined(CONFIG_ESP32_SPIRAM_SUPPORT) + #if defined(CONFIG_SPIRAM) compare_and_set_extram(addr, compare, set); #endif } @@ -374,18 +346,47 @@ #ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER /* Coarse resolution time (us) */ - #define portALT_GET_RUN_TIME_COUNTER_VALUE( x ) x = ( uint32_t ) esp_timer_get_time() + #define portALT_GET_RUN_TIME_COUNTER_VALUE( x ) do { x = ( uint32_t )esp_timer_get_time(); } while( 0 ) #endif /* Kernel utilities. */ void vPortYield( void ); + void vPortEvaluateYieldFromISR( int argc, ... ); void _frxt_setup_switch( void ); - #define portYIELD() vPortYield() - #define portYIELD_FROM_ISR() { traceISR_EXIT_TO_SCHEDULER(); _frxt_setup_switch(); } +/* Macro to count number of arguments of a __VA_ARGS__ used to support portYIELD_FROM_ISR with, + * or without arguments. The macro counts only 0 or 1 arguments. + * + * In the future, we want to switch to C++20. We also want to become compatible with clang. + * Hence, we provide two versions of the following macros which are using variadic arguments. + * The first one is using the GNU extension ##__VA_ARGS__. The second one is using the C++20 feature __VA_OPT__(,). + * This allows users to compile their code with standard C++20 enabled instead of the GNU extension. + * Below C++20, we haven't found any good alternative to using ##__VA_ARGS__. + */ + #if defined( __cplusplus ) && ( __cplusplus > 201703L ) + #define portGET_ARGUMENT_COUNT(...) portGET_ARGUMENT_COUNT_INNER( 0 __VA_OPT__(,) __VA_ARGS__, 1 , 0 ) + #else + #define portGET_ARGUMENT_COUNT(...) portGET_ARGUMENT_COUNT_INNER( 0, ##__VA_ARGS__, 1, 0 ) + #endif + #define portGET_ARGUMENT_COUNT_INNER( zero, one, count, ... ) count - static inline uint32_t xPortGetCoreID(); + _Static_assert( portGET_ARGUMENT_COUNT() == 0, "portGET_ARGUMENT_COUNT() result does not match for 0 arguments" ); + _Static_assert( portGET_ARGUMENT_COUNT( 1 ) == 1, "portGET_ARGUMENT_COUNT() result does not match for 1 argument" ); + + #define portYIELD() vPortYield() + +/* The macro below could be used when passing a single argument, or without any argument, + * it was developed to support both usages of portYIELD inside of an ISR. Any other usage form + * might result in undesired behaviour + */ + #if defined( __cplusplus ) && ( __cplusplus > 201703L ) + #define portYIELD_FROM_ISR(...) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ) __VA_OPT__( , ) __VA_ARGS__ ) + #else + #define portYIELD_FROM_ISR(...) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ), ##__VA_ARGS__ ) + #endif + + static inline BaseType_t xPortGetCoreID(); /*-----------------------------------------------------------*/ @@ -428,10 +429,11 @@ #endif void vApplicationSleep( TickType_t xExpectedIdleTime ); - void vPortSetStackWatchpoint( void* pxStackStart ); #define portSUPPRESS_TICKS_AND_SLEEP( idleTime ) vApplicationSleep( idleTime ) + void _xt_coproc_release( volatile void * coproc_sa_base ); + /*-----------------------------------------------------------*/ #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) @@ -458,10 +460,6 @@ /*-----------------------------------------------------------*/ - - void _xt_coproc_release( volatile void * coproc_sa_base ); - - /* * Map to the memory management routines required for the port. * @@ -520,20 +518,31 @@ #endif /* Multi-core: get current core ID */ - static inline uint32_t IRAM_ATTR xPortGetCoreID() + static inline BaseType_t IRAM_ATTR xPortGetCoreID() { - int id; - - asm ( - "rsr.prid %0\n" - " extui %0,%0,13,1" - : "=r" ( id ) ); - return id; + return ( uint32_t )cpu_hal_get_core_id(); } /* Get tick rate per second */ uint32_t xPortGetTickRateHz( void ); + static inline bool IRAM_ATTR xPortCanYield(void) + { + uint32_t ps_reg = 0; + + //Get the current value of PS (processor status) register + RSR(PS, ps_reg); + + /* + * intlevel = (ps_reg & 0xf); + * excm = (ps_reg >> 4) & 0x1; + * CINTLEVEL is max(excm * EXCMLEVEL, INTLEVEL), where EXCMLEVEL is 3. + * However, just return true, only intlevel is zero. + */ + + return ((ps_reg & PS_INTLEVEL_MASK) == 0); + } + /* porttrace */ #if configUSE_TRACE_FACILITY_2 #include "porttrace.h" diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_api.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_api.h index aeade0453a7..aaa8ccd45ea 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_api.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_api.h @@ -27,113 +27,4 @@ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -/****************************************************************************** -* Xtensa-specific API for RTOS ports. -******************************************************************************/ - -#ifndef __XTENSA_API_H__ -#define __XTENSA_API_H__ - -#include - -#include "xtensa_context.h" - - -/* Typedef for C-callable interrupt handler function */ -typedef void (* xt_handler)( void * ); - -/* Typedef for C-callable exception handler function */ -typedef void (* xt_exc_handler)( XtExcFrame * ); - - -/* - * ------------------------------------------------------------------------------- - * Call this function to set a handler for the specified exception. The handler - * will be installed on the core that calls this function. - * - * n - Exception number (type) - * f - Handler function address, NULL to uninstall handler. - * - * The handler will be passed a pointer to the exception frame, which is created - * on the stack of the thread that caused the exception. - * - * If the handler returns, the thread context will be restored and the faulting - * instruction will be retried. Any values in the exception frame that are - * modified by the handler will be restored as part of the context. For details - * of the exception frame structure see xtensa_context.h. - * ------------------------------------------------------------------------------- - */ -extern xt_exc_handler xt_set_exception_handler( int n, - xt_exc_handler f ); - - -/* - * ------------------------------------------------------------------------------- - * Call this function to set a handler for the specified interrupt. The handler - * will be installed on the core that calls this function. - * - * n - Interrupt number. - * f - Handler function address, NULL to uninstall handler. - * arg - Argument to be passed to handler. - * ------------------------------------------------------------------------------- - */ -extern xt_handler xt_set_interrupt_handler( int n, - xt_handler f, - void * arg ); - - -/* - * ------------------------------------------------------------------------------- - * Call this function to enable the specified interrupts on the core that runs - * this code. - * - * mask - Bit mask of interrupts to be enabled. - * ------------------------------------------------------------------------------- - */ -extern void xt_ints_on( unsigned int mask ); - - -/* - * ------------------------------------------------------------------------------- - * Call this function to disable the specified interrupts on the core that runs - * this code. - * - * mask - Bit mask of interrupts to be disabled. - * ------------------------------------------------------------------------------- - */ -extern void xt_ints_off( unsigned int mask ); - - -/* - * ------------------------------------------------------------------------------- - * Call this function to set the specified (s/w) interrupt. - * ------------------------------------------------------------------------------- - */ -static inline void xt_set_intset( unsigned int arg ) -{ - xthal_set_intset( arg ); -} - - -/* - * ------------------------------------------------------------------------------- - * Call this function to clear the specified (s/w or edge-triggered) - * interrupt. - * ------------------------------------------------------------------------------- - */ -static inline void xt_set_intclear( unsigned int arg ) -{ - xthal_set_intclear( arg ); -} - -/* - * ------------------------------------------------------------------------------- - * Call this function to get handler's argument for the specified interrupt. - * - * n - Interrupt number. - * ------------------------------------------------------------------------------- - */ -extern void * xt_get_interrupt_handler_arg( int n ); - -#endif /* __XTENSA_API_H__ */ +#include diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_context.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_context.h index 10016506510..fc24748c446 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_context.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_context.h @@ -27,373 +27,4 @@ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -/******************************************************************************* - - XTENSA CONTEXT FRAMES AND MACROS FOR RTOS ASSEMBLER SOURCES - -This header contains definitions and macros for use primarily by Xtensa -RTOS assembly coded source files. It includes and uses the Xtensa hardware -abstraction layer (HAL) to deal with config specifics. It may also be -included in C source files. - -!! Supports only Xtensa Exception Architecture 2 (XEA2). XEA1 not supported. !! - -NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes. - -*******************************************************************************/ - -#ifndef XTENSA_CONTEXT_H -#define XTENSA_CONTEXT_H - -#ifdef __ASSEMBLER__ -#include -#endif - -#include -#include -#include -#include -#include - - -/* Align a value up to nearest n-byte boundary, where n is a power of 2. */ -#define ALIGNUP(n, val) (((val) + (n)-1) & -(n)) - - -/* -------------------------------------------------------------------------------- - Macros that help define structures for both C and assembler. -------------------------------------------------------------------------------- -*/ - -#ifdef STRUCT_BEGIN -#undef STRUCT_BEGIN -#undef STRUCT_FIELD -#undef STRUCT_AFIELD -#undef STRUCT_END -#endif - -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) - -#define STRUCT_BEGIN .pushsection .text; .struct 0 -#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size -#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n) -#define STRUCT_END(sname) sname##Size:; .popsection - -#else - -#define STRUCT_BEGIN typedef struct { -#define STRUCT_FIELD(ctype,size,asname,name) ctype name; -#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n]; -#define STRUCT_END(sname) } sname; - -#endif //_ASMLANGUAGE || __ASSEMBLER__ - - -/* -------------------------------------------------------------------------------- - INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT - - A stack frame of this structure is allocated for any interrupt or exception. - It goes on the current stack. If the RTOS has a system stack for handling - interrupts, every thread stack must allow space for just one interrupt stack - frame, then nested interrupt stack frames go on the system stack. - - The frame includes basic registers (explicit) and "extra" registers introduced - by user TIE or the use of the MAC16 option in the user's Xtensa config. - The frame size is minimized by omitting regs not applicable to user's config. - - For Windowed ABI, this stack frame includes the interruptee's base save area, - another base save area to manage gcc nested functions, and a little temporary - space to help manage the spilling of the register windows. -------------------------------------------------------------------------------- -*/ - -STRUCT_BEGIN -STRUCT_FIELD (long, 4, XT_STK_EXIT, exit) /* exit point for dispatch */ -STRUCT_FIELD (long, 4, XT_STK_PC, pc) /* return PC */ -STRUCT_FIELD (long, 4, XT_STK_PS, ps) /* return PS */ -STRUCT_FIELD (long, 4, XT_STK_A0, a0) -STRUCT_FIELD (long, 4, XT_STK_A1, a1) /* stack pointer before interrupt */ -STRUCT_FIELD (long, 4, XT_STK_A2, a2) -STRUCT_FIELD (long, 4, XT_STK_A3, a3) -STRUCT_FIELD (long, 4, XT_STK_A4, a4) -STRUCT_FIELD (long, 4, XT_STK_A5, a5) -STRUCT_FIELD (long, 4, XT_STK_A6, a6) -STRUCT_FIELD (long, 4, XT_STK_A7, a7) -STRUCT_FIELD (long, 4, XT_STK_A8, a8) -STRUCT_FIELD (long, 4, XT_STK_A9, a9) -STRUCT_FIELD (long, 4, XT_STK_A10, a10) -STRUCT_FIELD (long, 4, XT_STK_A11, a11) -STRUCT_FIELD (long, 4, XT_STK_A12, a12) -STRUCT_FIELD (long, 4, XT_STK_A13, a13) -STRUCT_FIELD (long, 4, XT_STK_A14, a14) -STRUCT_FIELD (long, 4, XT_STK_A15, a15) -STRUCT_FIELD (long, 4, XT_STK_SAR, sar) -STRUCT_FIELD (long, 4, XT_STK_EXCCAUSE, exccause) -STRUCT_FIELD (long, 4, XT_STK_EXCVADDR, excvaddr) -#if XCHAL_HAVE_LOOPS -STRUCT_FIELD (long, 4, XT_STK_LBEG, lbeg) -STRUCT_FIELD (long, 4, XT_STK_LEND, lend) -STRUCT_FIELD (long, 4, XT_STK_LCOUNT, lcount) -#endif -#ifndef __XTENSA_CALL0_ABI__ -/* Temporary space for saving stuff during window spill */ -STRUCT_FIELD (long, 4, XT_STK_TMP0, tmp0) -STRUCT_FIELD (long, 4, XT_STK_TMP1, tmp1) -STRUCT_FIELD (long, 4, XT_STK_TMP2, tmp2) -#endif -#ifdef XT_USE_SWPRI -/* Storage for virtual priority mask */ -STRUCT_FIELD (long, 4, XT_STK_VPRI, vpri) -#endif -#ifdef XT_USE_OVLY -/* Storage for overlay state */ -STRUCT_FIELD (long, 4, XT_STK_OVLY, ovly) -#endif -STRUCT_END(XtExcFrame) - -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) -#define XT_STK_NEXT1 XtExcFrameSize -#else -#define XT_STK_NEXT1 sizeof(XtExcFrame) -#endif - -/* Allocate extra storage if needed */ -#if XCHAL_EXTRA_SA_SIZE != 0 - -#if XCHAL_EXTRA_SA_ALIGN <= 16 -#define XT_STK_EXTRA ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) -#else -/* If need more alignment than stack, add space for dynamic alignment */ -#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) + XCHAL_EXTRA_SA_ALIGN) -#endif -#define XT_STK_NEXT2 (XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE) - -#else - -#define XT_STK_NEXT2 XT_STK_NEXT1 - -#endif - -/* -------------------------------------------------------------------------------- - This is the frame size. Add space for 4 registers (interruptee's base save - area) and some space for gcc nested functions if any. -------------------------------------------------------------------------------- -*/ -#define XT_STK_FRMSZ (ALIGNUP(0x10, XT_STK_NEXT2) + 0x20) - - -/* -------------------------------------------------------------------------------- - SOLICITED STACK FRAME FOR A THREAD - - A stack frame of this structure is allocated whenever a thread enters the - RTOS kernel intentionally (and synchronously) to submit to thread scheduling. - It goes on the current thread's stack. - - The solicited frame only includes registers that are required to be preserved - by the callee according to the compiler's ABI conventions, some space to save - the return address for returning to the caller, and the caller's PS register. - - For Windowed ABI, this stack frame includes the caller's base save area. - - Note on XT_SOL_EXIT field: - It is necessary to distinguish a solicited from an interrupt stack frame. - This field corresponds to XT_STK_EXIT in the interrupt stack frame and is - always at the same offset (0). It can be written with a code (usually 0) - to distinguish a solicted frame from an interrupt frame. An RTOS port may - opt to ignore this field if it has another way of distinguishing frames. -------------------------------------------------------------------------------- -*/ - -STRUCT_BEGIN -#ifdef __XTENSA_CALL0_ABI__ -STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit) -STRUCT_FIELD (long, 4, XT_SOL_PC, pc) -STRUCT_FIELD (long, 4, XT_SOL_PS, ps) -STRUCT_FIELD (long, 4, XT_SOL_NEXT, next) -STRUCT_FIELD (long, 4, XT_SOL_A12, a12) /* should be on 16-byte alignment */ -STRUCT_FIELD (long, 4, XT_SOL_A13, a13) -STRUCT_FIELD (long, 4, XT_SOL_A14, a14) -STRUCT_FIELD (long, 4, XT_SOL_A15, a15) -#else -STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit) -STRUCT_FIELD (long, 4, XT_SOL_PC, pc) -STRUCT_FIELD (long, 4, XT_SOL_PS, ps) -STRUCT_FIELD (long, 4, XT_SOL_NEXT, next) -STRUCT_FIELD (long, 4, XT_SOL_A0, a0) /* should be on 16-byte alignment */ -STRUCT_FIELD (long, 4, XT_SOL_A1, a1) -STRUCT_FIELD (long, 4, XT_SOL_A2, a2) -STRUCT_FIELD (long, 4, XT_SOL_A3, a3) -#endif -STRUCT_END(XtSolFrame) - -/* Size of solicited stack frame */ -#define XT_SOL_FRMSZ ALIGNUP(0x10, XtSolFrameSize) - - -/* -------------------------------------------------------------------------------- - CO-PROCESSOR STATE SAVE AREA FOR A THREAD - - The RTOS must provide an area per thread to save the state of co-processors - when that thread does not have control. Co-processors are context-switched - lazily (on demand) only when a new thread uses a co-processor instruction, - otherwise a thread retains ownership of the co-processor even when it loses - control of the processor. An Xtensa co-processor exception is triggered when - any co-processor instruction is executed by a thread that is not the owner, - and the context switch of that co-processor is then peformed by the handler. - Ownership represents which thread's state is currently in the co-processor. - - Co-processors may not be used by interrupt or exception handlers. If an - co-processor instruction is executed by an interrupt or exception handler, - the co-processor exception handler will trigger a kernel panic and freeze. - This restriction is introduced to reduce the overhead of saving and restoring - co-processor state (which can be quite large) and in particular remove that - overhead from interrupt handlers. - - The co-processor state save area may be in any convenient per-thread location - such as in the thread control block or above the thread stack area. It need - not be in the interrupt stack frame since interrupts don't use co-processors. - - Along with the save area for each co-processor, two bitmasks with flags per - co-processor (laid out as in the CPENABLE reg) help manage context-switching - co-processors as efficiently as possible: - - XT_CPENABLE - The contents of a non-running thread's CPENABLE register. - It represents the co-processors owned (and whose state is still needed) - by the thread. When a thread is preempted, its CPENABLE is saved here. - When a thread solicits a context-swtich, its CPENABLE is cleared - the - compiler has saved the (caller-saved) co-proc state if it needs to. - When a non-running thread loses ownership of a CP, its bit is cleared. - When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg. - Avoids co-processor exceptions when no change of ownership is needed. - - XT_CPSTORED - A bitmask with the same layout as CPENABLE, a bit per co-processor. - Indicates whether the state of each co-processor is saved in the state - save area. When a thread enters the kernel, only the state of co-procs - still enabled in CPENABLE is saved. When the co-processor exception - handler assigns ownership of a co-processor to a thread, it restores - the saved state only if this bit is set, and clears this bit. - - XT_CP_CS_ST - A bitmask with the same layout as CPENABLE, a bit per co-processor. - Indicates whether callee-saved state is saved in the state save area. - Callee-saved state is saved by itself on a solicited context switch, - and restored when needed by the coprocessor exception handler. - Unsolicited switches will cause the entire coprocessor to be saved - when necessary. - - XT_CP_ASA - Pointer to the aligned save area. Allows it to be aligned more than - the overall save area (which might only be stack-aligned or TCB-aligned). - Especially relevant for Xtensa cores configured with a very large data - path that requires alignment greater than 16 bytes (ABI stack alignment). -------------------------------------------------------------------------------- -*/ - -#if XCHAL_CP_NUM > 0 - -/* Offsets of each coprocessor save area within the 'aligned save area': */ -#define XT_CP0_SA 0 -#define XT_CP1_SA ALIGNUP(XCHAL_CP1_SA_ALIGN, XT_CP0_SA + XCHAL_CP0_SA_SIZE) -#define XT_CP2_SA ALIGNUP(XCHAL_CP2_SA_ALIGN, XT_CP1_SA + XCHAL_CP1_SA_SIZE) -#define XT_CP3_SA ALIGNUP(XCHAL_CP3_SA_ALIGN, XT_CP2_SA + XCHAL_CP2_SA_SIZE) -#define XT_CP4_SA ALIGNUP(XCHAL_CP4_SA_ALIGN, XT_CP3_SA + XCHAL_CP3_SA_SIZE) -#define XT_CP5_SA ALIGNUP(XCHAL_CP5_SA_ALIGN, XT_CP4_SA + XCHAL_CP4_SA_SIZE) -#define XT_CP6_SA ALIGNUP(XCHAL_CP6_SA_ALIGN, XT_CP5_SA + XCHAL_CP5_SA_SIZE) -#define XT_CP7_SA ALIGNUP(XCHAL_CP7_SA_ALIGN, XT_CP6_SA + XCHAL_CP6_SA_SIZE) -#define XT_CP_SA_SIZE ALIGNUP(16, XT_CP7_SA + XCHAL_CP7_SA_SIZE) - -/* Offsets within the overall save area: */ -#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */ -#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */ -#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */ -#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */ -/* Overall size allows for dynamic alignment: */ -#define XT_CP_SIZE (12 + XT_CP_SA_SIZE + XCHAL_TOTAL_SA_ALIGN) -#else -#define XT_CP_SIZE 0 -#endif - - -/* - Macro to get the current core ID. Only uses the reg given as an argument. - Reading PRID on the ESP32 gives us 0xCDCD on the PRO processor (0) - and 0xABAB on the APP CPU (1). We can distinguish between the two by checking - bit 13: it's 1 on the APP and 0 on the PRO processor. -*/ -#ifdef __ASSEMBLER__ - .macro getcoreid reg - rsr.prid \reg - extui \reg,\reg,13,1 - .endm -#endif - -#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) -#define CORE_ID_PRO 0xCDCD -#define CORE_ID_APP 0xABAB -#else -#define CORE_ID_REGVAL_PRO 0xCDCD -#define CORE_ID_REGVAL_APP 0xABAB - -/* Included for compatibility, recommend using CORE_ID_REGVAL_PRO instead */ -#define CORE_ID_PRO CORE_ID_REGVAL_PRO - -/* Included for compatibility, recommend using CORE_ID_REGVAL_APP instead */ -#define CORE_ID_APP CORE_ID_REGVAL_APP -#endif - -/* -------------------------------------------------------------------------------- - MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN - - Convenient where the frame size requirements are the same for both ABIs. - ENTRY(sz), RET(sz) are for framed functions (have locals or make calls). - ENTRY0, RET0 are for frameless functions (no locals, no calls). - - where size = size of stack frame in bytes (must be >0 and aligned to 16). - For framed functions the frame is created and the return address saved at - base of frame (Call0 ABI) or as determined by hardware (Windowed ABI). - For frameless functions, there is no frame and return address remains in a0. - Note: Because CPP macros expand to a single line, macros requiring multi-line - expansions are implemented as assembler macros. -------------------------------------------------------------------------------- -*/ - -#ifdef __ASSEMBLER__ -#ifdef __XTENSA_CALL0_ABI__ - /* Call0 */ - #define ENTRY(sz) entry1 sz - .macro entry1 size=0x10 - addi sp, sp, -\size - s32i a0, sp, 0 - .endm - #define ENTRY0 - #define RET(sz) ret1 sz - .macro ret1 size=0x10 - l32i a0, sp, 0 - addi sp, sp, \size - ret - .endm - #define RET0 ret -#else - /* Windowed */ - #define ENTRY(sz) entry sp, sz - #define ENTRY0 entry sp, 0x10 - #define RET(sz) retw - #define RET0 retw -#endif -#endif - - - - - -#endif /* XTENSA_CONTEXT_H */ - +#include diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_rtos.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_rtos.h index fde7fccdd0d..4fe4483dded 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_rtos.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_rtos.h @@ -59,7 +59,7 @@ #include #include - +#include "sdkconfig.h" /* * Include any RTOS specific definitions that are needed by this header. */ @@ -154,7 +154,9 @@ * RTOS may optionally define XT_TICK_PER_SEC in its own way (eg. macro). */ /* void XT_RTOS_TIMER_INT(void) */ -#define XT_RTOS_TIMER_INT _frxt_timer_int +#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT +#define XT_RTOS_TIMER_INT _frxt_timer_int +#endif #define XT_TICK_PER_SEC configTICK_RATE_HZ /* diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/port.c b/portable/ThirdParty/GCC/Xtensa_ESP32/port.c index 7675591500e..00674744ef1 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/port.c +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/port.c @@ -1,5 +1,6 @@ /* * SPDX-FileCopyrightText: 2020 Amazon.com, Inc. or its affiliates + * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc. * * SPDX-License-Identifier: MIT * @@ -32,32 +33,31 @@ * * 1 tab == 4 spaces! */ - -/******************************************************************************* - * // Copyright (c) 2003-2015 Cadence Design Systems, Inc. - * // - * // Permission is hereby granted, free of charge, to any person obtaining - * // a copy of this software and associated documentation files (the - * // "Software"), to deal in the Software without restriction, including - * // without limitation the rights to use, copy, modify, merge, publish, - * // distribute, sublicense, and/or sell copies of the Software, and to - * // permit persons to whom the Software is furnished to do so, subject to - * // the following conditions: - * // - * // The above copyright notice and this permission notice shall be included - * // in all copies or substantial portions of the Software. - * // - * // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * ----------------------------------------------------------------------------- +/* + * Copyright (c) 2015-2019 Cadence Design Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include +#include #include #include "xtensa_rtos.h" @@ -68,7 +68,9 @@ #include "esp_panic.h" #include "esp_crosscore_int.h" #else -#if CONFIG_IDF_TARGET_ESP32S2 +#if CONFIG_IDF_TARGET_ESP32S3 + #include "esp32s3/rom/ets_sys.h" +#elif CONFIG_IDF_TARGET_ESP32S2 #include "esp32s2/rom/ets_sys.h" #elif CONFIG_IDF_TARGET_ESP32 #include "esp32/rom/ets_sys.h" @@ -87,23 +89,16 @@ #include "esp_intr_alloc.h" -/* Defined in portasm.h */ -extern void _frxt_tick_timer_init( void ); +#include "port_systick.h" /* Defined in xtensa_context.S */ extern void _xt_coproc_init( void ); - -#if CONFIG_FREERTOS_CORETIMER_0 - #define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER0_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF ) -#endif -#if CONFIG_FREERTOS_CORETIMER_1 - #define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER1_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF ) -#endif +_Static_assert(tskNO_AFFINITY == CONFIG_FREERTOS_NO_AFFINITY, "incorrect tskNO_AFFINITY value"); /*-----------------------------------------------------------*/ -unsigned port_xSchedulerRunning[ portNUM_PROCESSORS ] = { 0 }; /* Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting */ +extern volatile int port_xSchedulerRunning[portNUM_PROCESSORS]; unsigned port_interruptNesting[ portNUM_PROCESSORS ] = { 0 }; /* Interrupt nesting level. Increased/decreased in portasm.c, _frxt_int_enter/_frxt_int_exit */ /*-----------------------------------------------------------*/ @@ -148,8 +143,28 @@ void _xt_user_exit( void ); uint32_t * p; #endif + uint32_t * threadptr; + void * task_thread_local_start; + extern int _thread_local_start, _thread_local_end, _flash_rodata_start, _flash_rodata_align; + + /* TODO: check that TLS area fits the stack */ + uint32_t thread_local_sz = ( uint8_t * ) &_thread_local_end - ( uint8_t * ) &_thread_local_start; + + thread_local_sz = ALIGNUP( 0x10, thread_local_sz ); + + /* Initialize task's stack so that we have the following structure at the top: + + ----LOW ADDRESSES ----------------------------------------HIGH ADDRESSES---------- + task stack | interrupt stack frame | thread local vars | co-processor save area | + ---------------------------------------------------------------------------------- + | | + SP pxTopOfStack + + All parts are aligned to 16 byte boundary. + */ + /* Create interrupt stack frame aligned to 16 byte boundary */ - sp = ( StackType_t * ) ( ( ( UBaseType_t ) pxTopOfStack - XT_CP_SIZE - XT_STK_FRMSZ ) & ~0xf ); + sp = ( StackType_t * ) ( ( ( UBaseType_t ) pxTopOfStack - XT_CP_SIZE - thread_local_sz - XT_STK_FRMSZ ) & ~0xf ); /* Clear the entire frame (do not use memset() because we don't depend on C library) */ for( tp = sp; tp <= pxTopOfStack; ++tp ) @@ -195,6 +210,24 @@ void _xt_user_exit( void ); frame->vpri = 0xFFFFFFFF; #endif + /* Init threadptr register and set up TLS run-time area. */ + task_thread_local_start = ( void * ) ( ( ( uint32_t ) pxTopOfStack - XT_CP_SIZE - thread_local_sz ) & ~0xf ); + memcpy( task_thread_local_start, &_thread_local_start, thread_local_sz ); + threadptr = ( uint32_t * ) ( sp + XT_STK_EXTRA ); + + /* Calculate THREADPTR value. + * The generated code will add THREADPTR value to a constant value determined at link time, + * to get the address of the TLS variable. + * The constant value is calculated by the linker as follows + * (search for 'tpoff' in elf32-xtensa.c in BFD): + * offset = address - tls_section_vma + align_up(TCB_SIZE, tls_section_alignment) + * where TCB_SIZE is hardcoded to 8. + */ + const uint32_t tls_section_alignment = ( uint32_t ) &_flash_rodata_align; /* ALIGN value of .flash.rodata section */ + const uint32_t tcb_size = 8; /* Unrelated to FreeRTOS, this is the constant from BFD */ + const uint32_t base = ( tcb_size + tls_section_alignment - 1 ) & ( ~( tls_section_alignment - 1 ) ); + *threadptr = ( uint32_t ) task_thread_local_start - ( ( uint32_t ) &_thread_local_start - ( uint32_t ) &_flash_rodata_start ) - base; + #if XCHAL_CP_NUM > 0 /* Init the coprocessor save area (see xtensa_context.h) */ @@ -217,12 +250,14 @@ void vPortEndScheduler( void ) { /* It is unlikely that the Xtensa port will get stopped. If required simply * disable the tick interrupt here. */ + abort(); } /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) { + portDISABLE_INTERRUPTS(); /* Interrupts are disabled at this point and stack contains PS with enabled interrupts when task context is restored */ #if XCHAL_CP_NUM > 0 @@ -230,11 +265,21 @@ BaseType_t xPortStartScheduler( void ) _xt_coproc_init(); #endif - /* Init the tick divisor value */ - _xt_tick_divisor_init(); - - /* Setup the hardware to generate the tick. */ - _frxt_tick_timer_init(); + /* Setup the hardware to generate the tick */ + vPortSetupTimer(); + + /* NOTE: For ESP32-S3, vPortSetupTimer allocates an interrupt for the + * systimer which is used as the source for FreeRTOS systick. + * + * The behaviour of portEXIT_CRITICAL is different in FreeRTOS and ESP-IDF - + * the former enables the interrupts no matter what the state was at the beginning + * of the call while the latter restores the interrupt state to what was at the + * beginning of the call. + * + * This resulted in the interrupts being enabled before the _frxt_dispatch call, + * who was unable to switch context to the queued tasks. + */ + portDISABLE_INTERRUPTS(); port_xSchedulerRunning[ xPortGetCoreID() ] = 1; @@ -244,37 +289,8 @@ BaseType_t xPortStartScheduler( void ) /* Should not get here. */ return pdTRUE; } -/*-----------------------------------------------------------*/ - -BaseType_t xPortSysTickHandler( void ) -{ - BaseType_t ret; - unsigned interruptMask; - - portbenchmarkIntLatency(); - traceISR_ENTER( SYSTICK_INTR_ID ); - - /* Interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY must be - * disabled before calling xTaskIncrementTick as it access the - * kernel lists. */ - interruptMask = portSET_INTERRUPT_MASK_FROM_ISR(); - { - ret = xTaskIncrementTick(); - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( interruptMask ); - - if( ret != pdFALSE ) - { - portYIELD_FROM_ISR(); - } - else - { - traceISR_EXIT(); - } - - return ret; -} +/*-----------------------------------------------------------*/ void vPortYieldOtherCore( BaseType_t coreid ) { @@ -297,7 +313,6 @@ void vPortYieldOtherCore( BaseType_t coreid ) xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) xMPUSettings->coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( uint32_t ) xMPUSettings->coproc_area - XT_CP_SIZE ) & ~0xf ); - /* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to * clear the stack area after we return. This is done in pxPortInitialiseStack(). */ @@ -321,9 +336,9 @@ BaseType_t xPortInIsrContext() unsigned int irqStatus; BaseType_t ret; - irqStatus = portENTER_CRITICAL_NESTED(); + irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); ret = ( port_interruptNesting[ xPortGetCoreID() ] != 0 ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); return ret; } @@ -336,11 +351,39 @@ BaseType_t IRAM_ATTR xPortInterruptedFromISRContext() return( port_interruptNesting[ xPortGetCoreID() ] != 0 ); } +void IRAM_ATTR vPortEvaluateYieldFromISR( int argc, ... ) +{ + BaseType_t xYield; + va_list ap; + va_start( ap, argc ); + + if( argc ) + { + xYield = ( BaseType_t )va_arg( ap, int ); + va_end( ap ); + } + else + { + //it is a empty parameter vPortYieldFromISR macro call: + va_end( ap ); + traceISR_EXIT_TO_SCHEDULER(); + _frxt_setup_switch(); + return; + } + + //Yield exists, so need evaluate it first then switch: + if( xYield == pdTRUE ) + { + traceISR_EXIT_TO_SCHEDULER(); + _frxt_setup_switch(); + } +} + void vPortAssertIfInISR() { if( xPortInIsrContext() ) { - ets_printf( "core=%d port_interruptNesting=%d\n\n", xPortGetCoreID(), port_interruptNesting[ xPortGetCoreID() ] ); + esp_rom_printf( "core=%d port_interruptNesting=%d\n\n", xPortGetCoreID(), port_interruptNesting[ xPortGetCoreID() ] ); } configASSERT( !xPortInIsrContext() ); @@ -352,7 +395,7 @@ void vPortAssertIfInISR() void vPortCPUInitializeMutex( portMUX_TYPE * mux ) { #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG - ets_printf( "Initializing mux %p\n", mux ); + esp_rom_printf( "Initializing mux %p\n", mux ); mux->lastLockedFn = "(never locked)"; mux->lastLockedLine = -1; #endif @@ -370,10 +413,10 @@ void vPortCPUInitializeMutex( portMUX_TYPE * mux ) const char * fnName, int line ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, fnName, line ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); } bool vPortCPUAcquireMutexTimeout( portMUX_TYPE * mux, @@ -381,29 +424,29 @@ void vPortCPUInitializeMutex( portMUX_TYPE * mux ) const char * fnName, int line ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); bool result = vPortCPUAcquireMutexIntsDisabled( mux, timeout_cycles, fnName, line ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); return result; } #else /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */ void vPortCPUAcquireMutex( portMUX_TYPE * mux ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); } bool vPortCPUAcquireMutexTimeout( portMUX_TYPE * mux, int timeout_cycles ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); bool result = vPortCPUAcquireMutexIntsDisabled( mux, timeout_cycles ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); return result; } #endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */ @@ -419,21 +462,24 @@ void vPortCPUInitializeMutex( portMUX_TYPE * mux ) const char * fnName, int line ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); vPortCPUReleaseMutexIntsDisabled( mux, fnName, line ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); } #else void vPortCPUReleaseMutex( portMUX_TYPE * mux ) { - unsigned int irqStatus = portENTER_CRITICAL_NESTED(); + unsigned int irqStatus = portSET_INTERRUPT_MASK_FROM_ISR(); vPortCPUReleaseMutexIntsDisabled( mux ); - portEXIT_CRITICAL_NESTED( irqStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( irqStatus ); } #endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */ +#define STACK_WATCH_AREA_SIZE ( 32 ) +#define STACK_WATCH_POINT_NUMBER ( SOC_CPU_WATCHPOINTS_NUM - 1 ) + void vPortSetStackWatchpoint( void * pxStackStart ) { /*Set watchpoint 1 to watch the last 32 bytes of the stack. */ @@ -445,7 +491,7 @@ void vPortSetStackWatchpoint( void * pxStackStart ) int addr = ( int ) pxStackStart; addr = ( addr + 31 ) & ( ~31 ); - esp_set_watchpoint( 1, ( char * ) addr, 32, ESP_WATCHPOINT_STORE ); + esp_cpu_set_watchpoint( STACK_WATCH_POINT_NUMBER, (char*)addr, 32, ESP_WATCHPOINT_STORE ); } #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) @@ -463,7 +509,7 @@ void vPortSetStackWatchpoint( void * pxStackStart ) { uint32_t prev; - uint32_t oldlevel = portENTER_CRITICAL_NESTED(); + uint32_t oldlevel = portSET_INTERRUPT_MASK_FROM_ISR(); #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT, __FUNCTION__, __LINE__ ); @@ -484,7 +530,7 @@ void vPortSetStackWatchpoint( void * pxStackStart ) vPortCPUReleaseMutexIntsDisabled( &extram_mux ); #endif - portEXIT_CRITICAL_NESTED(oldlevel); + portCLEAR_INTERRUPT_MASK_FROM_ISR(oldlevel); } #endif //defined(CONFIG_SPIRAM_SUPPORT) @@ -495,3 +541,28 @@ uint32_t xPortGetTickRateHz( void ) { return ( uint32_t ) configTICK_RATE_HZ; } + +// For now, running FreeRTOS on one core and a bare metal on the other (or other OSes) +// is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE +// should mirror each other's values. +// +// And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE. +#if CONFIG_FREERTOS_UNICORE != CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE + #error "FreeRTOS and system configuration mismatch regarding the use of multiple cores." +#endif + +extern void esp_startup_start_app_common(void); + +void esp_startup_start_app(void) +{ +#if !CONFIG_ESP_INT_WDT +#if CONFIG_ESP32_ECO3_CACHE_LOCK_FIX + assert(!soc_has_cache_lock_bug() && "ESP32 Rev 3 + Dual Core + PSRAM requires INT WDT enabled in project config!"); +#endif +#endif + + esp_startup_start_app_common(); + + ESP_LOGI("cpu_start", "Starting scheduler on PRO CPU."); + vTaskStartScheduler(); +} diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/port_common.c b/portable/ThirdParty/GCC/Xtensa_ESP32/port_common.c new file mode 100644 index 00000000000..11f119bcde3 --- /dev/null +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/port_common.c @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include "FreeRTOS.h" +#include "task.h" +#include "portmacro.h" +#include "esp_system.h" +#include "esp_heap_caps_init.h" +#include "esp_int_wdt.h" +#include "esp_task_wdt.h" +#include "esp_task.h" +#include "esp_private/crosscore_int.h" +#include "esp_private/startup_internal.h" /* Required by g_spiram_ok. [refactor-todo] for g_spiram_ok */ +#include "esp_log.h" +#include "soc/soc_memory_types.h" +#include "soc/dport_access.h" +#include "sdkconfig.h" +#include "esp_freertos_hooks.h" + +#if CONFIG_IDF_TARGET_ESP32 +#include "esp32/spiram.h" +#elif CONFIG_IDF_TARGET_ESP32S2 +#include "esp32s2/spiram.h" +#elif CONFIG_IDF_TARGET_ESP32S3 +#include "esp32s3/spiram.h" +#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 +// SPIRAM is not supported on ESP32-C3 +#endif + +#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL +static const char* TAG = "cpu_start"; +#endif + +/* Architecture-agnostic parts of the FreeRTOS ESP-IDF port layer can go here. + * + * The actual call flow will be to call esp_startup_start_app() in /port.c, + * which will then call esp_startup_start_app_common() + */ + +// Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting +volatile unsigned port_xSchedulerRunning[portNUM_PROCESSORS] = {0}; + +// For now, running FreeRTOS on one core and a bare metal on the other (or other OSes) +// is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE +// should mirror each other's values. +// +// And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE. +#if CONFIG_FREERTOS_UNICORE != CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE + #error "FreeRTOS and system configuration mismatch regarding the use of multiple cores." +#endif + +static void main_task(void* args); + +#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME +void esp_gdbstub_init(void); +#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME + +extern void app_main(void); + +void esp_startup_start_app_common(void) +{ +#if CONFIG_ESP_INT_WDT + esp_int_wdt_init(); + //Initialize the interrupt watch dog for CPU0. + esp_int_wdt_cpu_init(); +#endif + + esp_crosscore_int_init(); + +#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME + esp_gdbstub_init(); +#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME + + portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main", + ESP_TASK_MAIN_STACK, NULL, + ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE); + assert(res == pdTRUE); + (void)res; +} + +#if !CONFIG_FREERTOS_UNICORE +static volatile bool s_other_cpu_startup_done = false; +static bool other_cpu_startup_idle_hook_cb(void) +{ + s_other_cpu_startup_done = true; + return true; +} +#endif + +static void main_task(void* args) +{ +#if !CONFIG_FREERTOS_UNICORE + // Wait for FreeRTOS initialization to finish on other core, before replacing its startup stack + esp_register_freertos_idle_hook_for_cpu(other_cpu_startup_idle_hook_cb, !xPortGetCoreID()); + while (!s_other_cpu_startup_done) { + ; + } + esp_deregister_freertos_idle_hook_for_cpu(other_cpu_startup_idle_hook_cb, !xPortGetCoreID()); +#endif + + // [refactor-todo] check if there is a way to move the following block to esp_system startup + heap_caps_enable_nonos_stack_heaps(); + + // Now we have startup stack RAM available for heap, enable any DMA pool memory +#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL + if (g_spiram_ok) { + esp_err_t r = esp_spiram_reserve_dma_pool(CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL); + if (r != ESP_OK) { + ESP_EARLY_LOGE(TAG, "Could not reserve internal/DMA pool (error 0x%x)", r); + abort(); + } + } +#endif + + //Initialize task wdt if configured to do so +#ifdef CONFIG_ESP_TASK_WDT_PANIC + ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, true)); +#elif CONFIG_ESP_TASK_WDT + ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, false)); +#endif + + //Add IDLE 0 to task wdt +#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0 + TaskHandle_t idle_0 = xTaskGetIdleTaskHandleForCPU(0); + if(idle_0 != NULL){ + ESP_ERROR_CHECK(esp_task_wdt_add(idle_0)); + } +#endif + //Add IDLE 1 to task wdt +#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1 + TaskHandle_t idle_1 = xTaskGetIdleTaskHandleForCPU(1); + if(idle_1 != NULL){ + ESP_ERROR_CHECK(esp_task_wdt_add(idle_1)); + } +#endif + + app_main(); + vTaskDelete(NULL); +} + +// -------------------- Heap Related ----------------------- + +bool xPortCheckValidTCBMem(const void *ptr) +{ + return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr); +} + +bool xPortcheckValidStackMem(const void *ptr) +{ +#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY + return esp_ptr_byte_accessible(ptr); +#else + return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr); +#endif +} diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/port_systick.c b/portable/ThirdParty/GCC/Xtensa_ESP32/port_systick.c new file mode 100644 index 00000000000..a9f437c61f1 --- /dev/null +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/port_systick.c @@ -0,0 +1,174 @@ +/* + * SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include "soc/cpu.h" +#include "FreeRTOS.h" +#include "task.h" +#include "esp_intr_alloc.h" +#include "esp_err.h" +#include "esp_log.h" +#include "sdkconfig.h" +#ifdef CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER +#include "soc/periph_defs.h" +#include "soc/system_reg.h" +#include "hal/systimer_hal.h" +#include "hal/systimer_ll.h" +#endif + +#ifdef CONFIG_PM_TRACE +#include "esp_private/pm_trace.h" +#endif //CONFIG_PM_TRACE + +BaseType_t xPortSysTickHandler(void); + +#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT +extern void _frxt_tick_timer_init(void); +extern void _xt_tick_divisor_init(void); + +#ifdef CONFIG_FREERTOS_CORETIMER_0 + #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER0_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF) +#endif +#ifdef CONFIG_FREERTOS_CORETIMER_1 + #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER1_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF) +#endif + +/** + * @brief Initialize CCONT timer to generate the tick interrupt + * + */ +void vPortSetupTimer(void) +{ + /* Init the tick divisor value */ + _xt_tick_divisor_init(); + + _frxt_tick_timer_init(); +} + + +#elif CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER + +_Static_assert(SOC_CPU_CORES_NUM <= SOC_SYSTIMER_ALARM_NUM - 1, "the number of cores must match the number of core alarms in SYSTIMER"); + +void SysTickIsrHandler(void *arg); + +static uint32_t s_handled_systicks[portNUM_PROCESSORS] = { 0 }; + +#define SYSTICK_INTR_ID (ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE) + +/** + * @brief Set up the systimer peripheral to generate the tick interrupt + * + * Both timer alarms are configured in periodic mode. + * It is done at the same time so SysTicks for both CPUs occur at the same time or very close. + * Shifts a time of triggering interrupts for core 0 and core 1. + */ +void vPortSetupTimer(void) +{ + unsigned cpuid = xPortGetCoreID(); +#ifdef CONFIG_FREERTOS_CORETIMER_SYSTIMER_LVL3 + const unsigned level = ESP_INTR_FLAG_LEVEL3; +#else + const unsigned level = ESP_INTR_FLAG_LEVEL1; +#endif + /* Systimer HAL layer object */ + static systimer_hal_context_t systimer_hal; + /* set system timer interrupt vector */ + ESP_ERROR_CHECK(esp_intr_alloc(ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE + cpuid, ESP_INTR_FLAG_IRAM | level, SysTickIsrHandler, &systimer_hal, NULL)); + + if (cpuid == 0) { + systimer_hal_init(&systimer_hal); + systimer_ll_set_counter_value(systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK, 0); + systimer_ll_apply_counter_value(systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK); + + for (cpuid = 0; cpuid < SOC_CPU_CORES_NUM; cpuid++) { + systimer_hal_counter_can_stall_by_cpu(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, false); + } + + for (cpuid = 0; cpuid < portNUM_PROCESSORS; ++cpuid) { + uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid; + + /* configure the timer */ + systimer_hal_connect_alarm_counter(&systimer_hal, alarm_id, SYSTIMER_LL_COUNTER_OS_TICK); + systimer_hal_set_alarm_period(&systimer_hal, alarm_id, 1000000UL / CONFIG_FREERTOS_HZ); + systimer_hal_select_alarm_mode(&systimer_hal, alarm_id, SYSTIMER_ALARM_MODE_PERIOD); + systimer_hal_counter_can_stall_by_cpu(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, true); + if (cpuid == 0) { + systimer_hal_enable_alarm_int(&systimer_hal, alarm_id); + systimer_hal_enable_counter(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK); +#ifndef CONFIG_FREERTOS_UNICORE + // SysTick of core 0 and core 1 are shifted by half of period + systimer_hal_counter_value_advance(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, 1000000UL / CONFIG_FREERTOS_HZ / 2); +#endif + } + } + } else { + uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid; + systimer_hal_enable_alarm_int(&systimer_hal, alarm_id); + } +} + +/** + * @brief Systimer interrupt handler. + * + * The Systimer interrupt for SysTick works in periodic mode no need to calc the next alarm. + * If a timer interrupt is ever serviced more than one tick late, it is necessary to process multiple ticks. + */ +IRAM_ATTR void SysTickIsrHandler(void *arg) +{ + uint32_t cpuid = xPortGetCoreID(); + systimer_hal_context_t *systimer_hal = (systimer_hal_context_t *)arg; +#ifdef CONFIG_PM_TRACE + ESP_PM_TRACE_ENTER(TICK, cpuid); +#endif + + uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid; + do { + systimer_ll_clear_alarm_int(systimer_hal->dev, alarm_id); + + uint32_t diff = systimer_hal_get_counter_value(systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK) / systimer_ll_get_alarm_period(systimer_hal->dev, alarm_id) - s_handled_systicks[cpuid]; + if (diff > 0) { + if (s_handled_systicks[cpuid] == 0) { + s_handled_systicks[cpuid] = diff; + diff = 1; + } else { + s_handled_systicks[cpuid] += diff; + } + + do { + xPortSysTickHandler(); + } while (--diff); + } + } while (systimer_ll_is_alarm_int_fired(systimer_hal->dev, alarm_id)); + +#ifdef CONFIG_PM_TRACE + ESP_PM_TRACE_EXIT(TICK, cpuid); +#endif +} + +#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT + +/** + * @brief Handler of SysTick + * + * The function is called from: + * - _frxt_timer_int for xtensa with CONFIG_FREERTOS_SYSTICK_USES_CCOUNT + * - SysTickIsrHandler for xtensa with CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER + * - SysTickIsrHandler for riscv + */ +BaseType_t xPortSysTickHandler(void) +{ + portbenchmarkIntLatency(); + traceISR_ENTER(SYSTICK_INTR_ID); + BaseType_t ret = xTaskIncrementTick(); + if(ret != pdFALSE) { + portYIELD_FROM_ISR(); + } else { + traceISR_EXIT(); + } + return ret; +} diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/portasm.S b/portable/ThirdParty/GCC/Xtensa_ESP32/portasm.S index 27c4ff19c42..c7f65385719 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/portasm.S +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/portasm.S @@ -282,6 +282,7 @@ _frxt_int_exit: * ********************************************************************************************************** */ +#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT .globl _frxt_timer_int .type _frxt_timer_int,@function .align 4 @@ -333,7 +334,7 @@ _frxt_timer_int: s32i a3, sp, 8 #endif - /* Call the FreeRTOS tick handler (see port.c). */ + /* Call the FreeRTOS tick handler (see port_systick.c). */ #ifdef __XTENSA_CALL0_ABI__ call0 xPortSysTickHandler #else @@ -359,6 +360,7 @@ _frxt_timer_int: #endif // CONFIG_PM_TRACE RET(16) +#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT /* ********************************************************************************************************** @@ -370,6 +372,7 @@ _frxt_timer_int: * ********************************************************************************************************** */ +#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT .globl _frxt_tick_timer_init .type _frxt_tick_timer_init,@function .align 4 @@ -402,6 +405,7 @@ _frxt_tick_timer_init: #endif RET(16) +#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT /* ********************************************************************************************************** @@ -456,6 +460,10 @@ _frxt_dispatch: .L_frxt_dispatch_sol: /* Solicited stack frame. Restore minimal context and return from vPortYield(). */ + #if XCHAL_HAVE_THREADPTR + l32i a2, sp, XT_SOL_THREADPTR + wur.threadptr a2 + #endif l32i a3, sp, XT_SOL_PS #ifdef __XTENSA_CALL0_ABI__ l32i a12, sp, XT_SOL_A12 @@ -543,6 +551,10 @@ vPortYield: rsr a2, PS s32i a0, sp, XT_SOL_PC s32i a2, sp, XT_SOL_PS + #if XCHAL_HAVE_THREADPTR + rur.threadptr a2 + s32i a2, sp, XT_SOL_THREADPTR + #endif #ifdef __XTENSA_CALL0_ABI__ s32i a12, sp, XT_SOL_A12 /* save callee-saved registers */ s32i a13, sp, XT_SOL_A13 diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_init.c b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_init.c index 837dfbc3f29..e7275c6234f 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_init.c +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_init.c @@ -39,34 +39,37 @@ #ifdef XT_BOARD - #include +#include "xtensa/xtbsp.h" #endif -#include "xtensa_rtos.h" +#include "xtensa_rtos.h" +#include "sdkconfig.h" #include "esp_idf_version.h" #if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) #include "esp_clk.h" #else -#if CONFIG_IDF_TARGET_ESP32S2 -#include "esp32s2/clk.h" -#elif CONFIG_IDF_TARGET_ESP32 -#include "esp32/clk.h" +#if CONFIG_IDF_TARGET_ESP32 +#include "esp32/clk.h" +#elif CONFIG_IDF_TARGET_ESP32S2 +#include "esp32s2/clk.h" +#elif CONFIG_IDF_TARGET_ESP32S3 +#include "esp32s3/clk.h" #endif #endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */ #ifdef XT_RTOS_TIMER_INT - unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */ +unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */ - void _xt_tick_divisor_init( void ) - { - _xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC; - } +void _xt_tick_divisor_init(void) +{ + _xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC; +} /* Deprecated, to be removed */ - int xt_clock_freq( void ) - { - return esp_clk_cpu_freq(); - } +int xt_clock_freq(void) +{ + return esp_clk_cpu_freq(); +} #endif /* XT_RTOS_TIMER_INT */ diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr.c b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr.c deleted file mode 100644 index d1882e4b040..00000000000 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc. - * - * SPDX-License-Identifier: MIT - * - * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD - */ -/* - * Copyright (c) 2015-2019 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/****************************************************************************** -* Xtensa-specific interrupt and exception functions for RTOS ports. -* Also see xtensa_intr_asm.S. -******************************************************************************/ - -#include - -#include - -#include "freertos/FreeRTOS.h" -#include "freertos/xtensa_api.h" -#include "freertos/portable.h" -#include "esp_idf_version.h" - -#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) -#include "rom/ets_sys.h" -#else -#if CONFIG_IDF_TARGET_ESP32S2 -#include "esp32s2/rom/ets_sys.h" -#elif CONFIG_IDF_TARGET_ESP32 -#include "esp32/rom/ets_sys.h" -#endif -#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */ - -#if XCHAL_HAVE_EXCEPTIONS - -/* Handler table is in xtensa_intr_asm.S */ - - extern xt_exc_handler _xt_exception_table[ XCHAL_EXCCAUSE_NUM * portNUM_PROCESSORS ]; - - -/* - * Default handler for unhandled exceptions. - * CHANGED: We do this in panic.c now - */ - -/*void xt_unhandled_exception(XtExcFrame *frame) */ -/*{ */ - /*exit(-1); */ -/*} */ - extern void xt_unhandled_exception( XtExcFrame * frame ); - - -/* - * This function registers a handler for the specified exception. - * The function returns the address of the previous handler. - * On error, it returns 0. - */ - xt_exc_handler xt_set_exception_handler( int n, - xt_exc_handler f ) - { - xt_exc_handler old; - - if( ( n < 0 ) || ( n >= XCHAL_EXCCAUSE_NUM ) ) - { - return 0; /* invalid exception number */ - } - - /* Convert exception number to _xt_exception_table name */ - n = n * portNUM_PROCESSORS + xPortGetCoreID(); - old = _xt_exception_table[ n ]; - - if( f ) - { - _xt_exception_table[ n ] = f; - } - else - { - _xt_exception_table[ n ] = &xt_unhandled_exception; - } - - return( ( old == &xt_unhandled_exception ) ? 0 : old ); - } - -#endif /* if XCHAL_HAVE_EXCEPTIONS */ - -#if XCHAL_HAVE_INTERRUPTS - -/* Handler table is in xtensa_intr_asm.S */ - - typedef struct xt_handler_table_entry - { - void * handler; - void * arg; - } xt_handler_table_entry; - - extern xt_handler_table_entry _xt_interrupt_table[ XCHAL_NUM_INTERRUPTS * portNUM_PROCESSORS ]; - - -/* - * Default handler for unhandled interrupts. - */ - void xt_unhandled_interrupt( void * arg ) - { - ets_printf( "Unhandled interrupt %d on cpu %d!\n", ( int ) arg, xPortGetCoreID() ); - } - - -/* - * This function registers a handler for the specified interrupt. The "arg" - * parameter specifies the argument to be passed to the handler when it is - * invoked. The function returns the address of the previous handler. - * On error, it returns 0. - */ - xt_handler xt_set_interrupt_handler( int n, - xt_handler f, - void * arg ) - { - xt_handler_table_entry * entry; - xt_handler old; - - if( ( n < 0 ) || ( n >= XCHAL_NUM_INTERRUPTS ) ) - { - return 0; /* invalid interrupt number */ - } - - if( Xthal_intlevel[ n ] > XCHAL_EXCM_LEVEL ) - { - return 0; /* priority level too high to safely handle in C */ - } - - /* Convert exception number to _xt_exception_table name */ - n = n * portNUM_PROCESSORS + xPortGetCoreID(); - - entry = _xt_interrupt_table + n; - old = entry->handler; - - if( f ) - { - entry->handler = f; - entry->arg = arg; - } - else - { - entry->handler = &xt_unhandled_interrupt; - entry->arg = ( void * ) n; - } - - return( ( old == &xt_unhandled_interrupt ) ? 0 : old ); - } - - #if CONFIG_SYSVIEW_ENABLE - void * xt_get_interrupt_handler_arg( int n ) - { - xt_handler_table_entry * entry; - - if( ( n < 0 ) || ( n >= XCHAL_NUM_INTERRUPTS ) ) - { - return 0; /* invalid interrupt number */ - } - - /* Convert exception number to _xt_exception_table name */ - n = n * portNUM_PROCESSORS + xPortGetCoreID(); - - entry = _xt_interrupt_table + n; - return entry->arg; - } - #endif /* if CONFIG_SYSVIEW_ENABLE */ - -#endif /* XCHAL_HAVE_INTERRUPTS */ diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr_asm.S b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr_asm.S deleted file mode 100644 index 879721291bf..00000000000 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_intr_asm.S +++ /dev/null @@ -1,232 +0,0 @@ -/* - * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc. - * - * SPDX-License-Identifier: MIT - * - * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD - */ -/* - * Copyright (c) 2015-2019 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/****************************************************************************** - Xtensa interrupt handling data and assembly routines. - Also see xtensa_intr.c and xtensa_vectors.S. -******************************************************************************/ - -#include -#include - -#include "xtensa_context.h" -#include "FreeRTOSConfig.h" - -#if XCHAL_HAVE_INTERRUPTS - -/* -------------------------------------------------------------------------------- - INTENABLE virtualization information. -------------------------------------------------------------------------------- -*/ - - -#if XT_USE_SWPRI -/* Warning - this is not multicore-compatible. */ - .data - .global _xt_intdata - .align 8 -_xt_intdata: - .global _xt_intenable - .type _xt_intenable,@object - .size _xt_intenable,4 - .global _xt_vpri_mask - .type _xt_vpri_mask,@object - .size _xt_vpri_mask,4 - -_xt_intenable: .word 0 /* Virtual INTENABLE */ -_xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */ -#endif - -/* -------------------------------------------------------------------------------- - Table of C-callable interrupt handlers for each interrupt. Note that not all - slots can be filled, because interrupts at level > EXCM_LEVEL will not be - dispatched to a C handler by default. - - Stored as: - int 0 cpu 0 - int 0 cpu 1 - ... - int 0 cpu n - int 1 cpu 0 - int 1 cpu 1 - etc -------------------------------------------------------------------------------- -*/ - - .data - .global _xt_interrupt_table - .align 8 - -_xt_interrupt_table: - - .set i, 0 - .rept XCHAL_NUM_INTERRUPTS*portNUM_PROCESSORS - .word xt_unhandled_interrupt /* handler address */ - .word i /* handler arg (default: intnum) */ - .set i, i+1 - .endr - -#endif /* XCHAL_HAVE_INTERRUPTS */ - - -#if XCHAL_HAVE_EXCEPTIONS - -/* -------------------------------------------------------------------------------- - Table of C-callable exception handlers for each exception. Note that not all - slots will be active, because some exceptions (e.g. coprocessor exceptions) - are always handled by the OS and cannot be hooked by user handlers. - - Stored as: - exc 0 cpu 0 - exc 0 cpu 1 - ... - exc 0 cpu n - exc 1 cpu 0 - exc 1 cpu 1 - etc -------------------------------------------------------------------------------- -*/ - - .data - .global _xt_exception_table - .align 4 - -_xt_exception_table: - .rept XCHAL_EXCCAUSE_NUM * portNUM_PROCESSORS - .word xt_unhandled_exception /* handler address */ - .endr - -#endif - - -/* -------------------------------------------------------------------------------- - unsigned int xt_ints_on ( unsigned int mask ) - - Enables a set of interrupts. Does not simply set INTENABLE directly, but - computes it as a function of the current virtual priority if XT_USE_SWPRI is - enabled. - Can be called from interrupt handlers. -------------------------------------------------------------------------------- -*/ - - .text - .align 4 - .global xt_ints_on - .type xt_ints_on,@function - -xt_ints_on: - - ENTRY0 - -#if XCHAL_HAVE_INTERRUPTS -#if XT_USE_SWPRI - movi a3, 0 - movi a4, _xt_intdata - xsr a3, INTENABLE /* Disables all interrupts */ - rsync - l32i a3, a4, 0 /* a3 = _xt_intenable */ - l32i a6, a4, 4 /* a6 = _xt_vpri_mask */ - or a5, a3, a2 /* a5 = _xt_intenable | mask */ - s32i a5, a4, 0 /* _xt_intenable |= mask */ - and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */ - wsr a5, INTENABLE /* Reenable interrupts */ - mov a2, a3 /* Previous mask */ -#else - movi a3, 0 - xsr a3, INTENABLE /* Disables all interrupts */ - rsync - or a2, a3, a2 /* set bits in mask */ - wsr a2, INTENABLE /* Re-enable ints */ - rsync - mov a2, a3 /* return prev mask */ -#endif -#else - movi a2, 0 /* Return zero */ -#endif - RET0 - - .size xt_ints_on, . - xt_ints_on - - -/* -------------------------------------------------------------------------------- - unsigned int xt_ints_off ( unsigned int mask ) - - Disables a set of interrupts. Does not simply set INTENABLE directly, - but computes it as a function of the current virtual priority if XT_USE_SWPRI is - enabled. - Can be called from interrupt handlers. -------------------------------------------------------------------------------- -*/ - - .text - .align 4 - .global xt_ints_off - .type xt_ints_off,@function - -xt_ints_off: - - ENTRY0 -#if XCHAL_HAVE_INTERRUPTS -#if XT_USE_SWPRI - movi a3, 0 - movi a4, _xt_intdata - xsr a3, INTENABLE /* Disables all interrupts */ - rsync - l32i a3, a4, 0 /* a3 = _xt_intenable */ - l32i a6, a4, 4 /* a6 = _xt_vpri_mask */ - or a5, a3, a2 /* a5 = _xt_intenable | mask */ - xor a5, a5, a2 /* a5 = _xt_intenable & ~mask */ - s32i a5, a4, 0 /* _xt_intenable &= ~mask */ - and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */ - wsr a5, INTENABLE /* Reenable interrupts */ - mov a2, a3 /* Previous mask */ -#else - movi a4, 0 - xsr a4, INTENABLE /* Disables all interrupts */ - rsync - or a3, a4, a2 /* set bits in mask */ - xor a3, a3, a2 /* invert bits in mask set in mask, essentially clearing them */ - wsr a3, INTENABLE /* Re-enable ints */ - rsync - mov a2, a4 /* return prev mask */ -#endif -#else - movi a2, 0 /* return zero */ -#endif - RET0 - - .size xt_ints_off, . - xt_ints_off - - diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vector_defaults.S b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vector_defaults.S index 39327779e3e..a016e02d924 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vector_defaults.S +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vector_defaults.S @@ -29,6 +29,21 @@ The default behaviour is to just exit the interrupt or call the panic handler on .align 4 _xt_debugexception: +#if (CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI) +#define XT_DEBUGCAUSE_DI (5) + getcoreid a0 +#if (CONFIG_BTDM_CTRL_PINNED_TO_CORE == PRO_CPU_NUM) + beqz a0, 1f +#else + bnez a0, 1f +#endif + + rsr a0, DEBUGCAUSE + extui a0, a0, XT_DEBUGCAUSE_DI, 1 + bnez a0, _xt_debug_di_exc +1: +#endif //(CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI) + movi a0,PANIC_RSN_DEBUGEXCEPTION wsr a0,EXCCAUSE /* _xt_panic assumes a level 1 exception. As we're @@ -41,6 +56,66 @@ _xt_debugexception: call0 _xt_panic /* does not return */ rfi XCHAL_DEBUGLEVEL +#if (CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI) + .align 4 +_xt_debug_di_exc: + + /* + The delay time can be calculated by the following formula: + T = ceil(0.25 + max(t1, t2)) us + + t1 = 80 / f1, t2 = (1 + 14/N) * 20 / f2 + + f1: PSRAM access frequency, unit: MHz. + f2: Flash access frequency, unit: MHz. + + When flash is slow/fast read, N = 1. + When flash is DOUT/DIO read, N = 2. + When flash is QOUT/QIO read, N = 4. + + And after testing, when CPU frequency is 240 MHz, it will take 1us to loop 27 times. + */ +#if defined(CONFIG_ESPTOOLPY_FLASHMODE_QIO) || defined(CONFIG_ESPTOOLPY_FLASHMODE_QOUT) + +# if defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_80M) + movi a0, 54 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 81 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_40M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 81 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_26M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 108 +# else + movi a0, 135 +# endif + +#elif defined(CONFIG_ESPTOOLPY_FLASHMODE_DIO) || defined(CONFIG_ESPTOOLPY_FLASHMODE_DOUT) + +# if defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_80M) + movi a0, 81 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 81 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_40M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 135 +# elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_26M) && defined(CONFIG_SPIRAM_SPEED_40M) + movi a0, 189 +# else + movi a0, 243 +# endif + +#else + movi a0, 243 +#endif + +1: addi a0, a0, -1 /* delay_us(N) */ + .rept 4 + nop + .endr + bnez a0, 1b + + rsr a0, EXCSAVE+XCHAL_DEBUGLEVEL + rfi XCHAL_DEBUGLEVEL +#endif //(CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI) #endif /* Debug exception */ @@ -158,4 +233,3 @@ _xt_nmi: rfi XCHAL_NMILEVEL #endif /* NMI */ - diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vectors.S b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vectors.S index a431758d43a..3a0a9569d4e 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vectors.S +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/xtensa_vectors.S @@ -306,9 +306,13 @@ rsil a3, \level - 1 /* lower interrupt level by 1 */ #endif + #ifdef XT_RTOS_TIMER_INT movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */ wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */ beq a3, a4, 7f /* if timer interrupt then skip table */ + #else + wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */ + #endif // XT_RTOS_TIMER_INT find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */ @@ -332,7 +336,7 @@ #else j .L_xt_user_int_&level& /* check for more interrupts */ #endif - + #ifdef XT_RTOS_TIMER_INT 7: .ifeq XT_TIMER_INTPRI - \level @@ -352,6 +356,7 @@ call4 XT_RTOS_TIMER_INT #endif .endif + #endif // XT_RTOS_TIMER_INT #ifdef XT_USE_SWPRI j 8f @@ -377,7 +382,7 @@ .endm - +#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0)) /* -------------------------------------------------------------------------------- Panic handler. @@ -460,14 +465,12 @@ panic_print_hex_ok: s32i a5,a3,0 ret - +#endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */ .section .rodata, "a" .align 4 - - /* -------------------------------------------------------------------------------- Hooks to dynamically install handlers for exceptions and interrupts. @@ -737,11 +740,11 @@ _xt_user_exc: rsr a0, EXCSAVE_1 /* save interruptee's a0 */ s32i a0, sp, XT_STK_A0 - /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */ + /* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */ #ifdef __XTENSA_CALL0_ABI__ - movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM + movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM #else - movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE + movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE #endif wsr a0, PS @@ -2062,6 +2065,3 @@ _WindowUnderflow12: .type call_user_start,@function .align 4 .literal_position - - - From ed83560808013f75730447f8c988297408ec40ba Mon Sep 17 00:00:00 2001 From: arshi016 Date: Wed, 12 Oct 2022 00:17:02 -0400 Subject: [PATCH 121/164] Add warning message to ensure min stack size (#575) Co-authored-by: alfred gedeon <28123637+alfred2g@users.noreply.github.com> --- portable/ThirdParty/GCC/Posix/port.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index c18705a4949..bd0842fbf55 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -148,7 +148,12 @@ portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, thread->xDying = pdFALSE; pthread_attr_init( &xThreadAttributes ); - pthread_attr_setstack( &xThreadAttributes, pxEndOfStack, ulStackSize ); + iRet = pthread_attr_setstack( &xThreadAttributes, pxEndOfStack, ulStackSize ); + if( iRet != 0 ) + { + fprintf( stderr, "[WARN] pthread_attr_setstack failed with return value: %d. Default stack will be used.\n", iRet ); + fprintf( stderr, "[WARN] Increase the stack size to PTHREAD_STACK_MIN.\n" ); + } thread->ev = event_create(); From 39723cbc3b7977fe8d85712721267167c18ccb43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20G=C3=BCrtler?= Date: Thu, 13 Oct 2022 19:22:24 +0200 Subject: [PATCH 122/164] Removed the 'configASSERT( xInheritanceOccurred == pdFALSE )' assertion from xQueueSemaphoreTake as the reasoning behind it is wrong; it can trigger on wrongly on highly-contested semaphores on multicore systems. See https://forums.freertos.org/t/15967 (#576) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Niklas Gürtler --- queue.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/queue.c b/queue.c index cc3e1f574a9..47c6a737d49 100644 --- a/queue.c +++ b/queue.c @@ -1624,15 +1624,6 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { if( xTicksToWait == ( TickType_t ) 0 ) { - /* For inheritance to have occurred there must have been an - * initial timeout, and an adjusted timeout cannot become 0, as - * if it were 0 the function would have exited. */ - #if ( configUSE_MUTEXES == 1 ) - { - configASSERT( xInheritanceOccurred == pdFALSE ); - } - #endif /* configUSE_MUTEXES */ - /* The semaphore count was 0 and no block time is specified * (or the block time has expired) so exit now. */ taskEXIT_CRITICAL(); From 189ffd62742b8ee82c6e5e4eba6ce07f1b0ad3af Mon Sep 17 00:00:00 2001 From: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:04:53 -0700 Subject: [PATCH 123/164] Update the NIOSII port to enable longer jumps (#578) Update the NIOSII port so it works on systems with more RAM as per https://forums.freertos.org/t/nios-ii-r-nios2-call26-noat-linker-error/16028 --- portable/GCC/NiosII/port_asm.S | 43 +++++++++++++++++----------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/portable/GCC/NiosII/port_asm.S b/portable/GCC/NiosII/port_asm.S index 06cf5d28793..a28677ede36 100644 --- a/portable/GCC/NiosII/port_asm.S +++ b/portable/GCC/NiosII/port_asm.S @@ -27,12 +27,12 @@ */ .extern vTaskSwitchContext - + .set noat # Exported to start the first task. -.globl restore_sp_from_pxCurrentTCB - +.globl restore_sp_from_pxCurrentTCB + # Entry point for exceptions. .section .exceptions.entry.user, "xa" @@ -41,15 +41,15 @@ save_context: addi sp, sp, -116 # Create space on the stack. stw ra, 0(sp) # Leave a gap for muldiv 0 - stw at, 8(sp) + stw at, 8(sp) stw r2, 12(sp) stw r3, 16(sp) stw r4, 20(sp) - stw r5, 24(sp) - stw r6, 28(sp) - stw r7, 32(sp) - stw r8, 36(sp) - stw r9, 40(sp) + stw r5, 24(sp) + stw r6, 28(sp) + stw r7, 32(sp) + stw r8, 36(sp) + stw r9, 40(sp) stw r10, 44(sp) stw r11, 48(sp) stw r12, 52(sp) @@ -76,12 +76,12 @@ save_sp_to_pxCurrentTCB: ldw et, (et) # Load the value of the pxCurrentTCB pointer stw sp, (et) # Store the stack pointer into the top of the TCB - br irq_test_user # skip the section .exceptions.entry + br irq_test_user # skip the section .exceptions.entry - .section .exceptions.irqtest, "xa" + .section .exceptions.irqtest, "xa" irq_test_user: - .section .exceptions.exit.user, "xa" + .section .exceptions.exit.user, "xa" restore_sp_from_pxCurrentTCB: movia et, pxCurrentTCB # Load the address of the pxCurrentTCB pointer ldw et, (et) # Load the value of the pxCurrentTCB pointer @@ -94,11 +94,11 @@ restore_context: ldw r2, 12(sp) ldw r3, 16(sp) ldw r4, 20(sp) - ldw r5, 24(sp) - ldw r6, 28(sp) - ldw r7, 32(sp) - ldw r8, 36(sp) - ldw r9, 40(sp) + ldw r5, 24(sp) + ldw r6, 28(sp) + ldw r7, 32(sp) + ldw r8, 36(sp) + ldw r9, 40(sp) ldw r10, 44(sp) ldw r11, 48(sp) ldw r12, 52(sp) @@ -120,8 +120,8 @@ restore_context: ldw fp, 112(sp) addi sp, sp, 116 # Release stack space - eret # Return to address ea, loading eStatus into Status. - + eret # Return to address ea, loading eStatus into Status. + .section .exceptions.soft, "xa" soft_exceptions: movhi r3, 0x003b /* upper half of trap opcode */ @@ -131,8 +131,9 @@ soft_exceptions: call_scheduler: stw ea, 72(sp) # EA is PC+4 so will skip over instruction causing exception - call vTaskSwitchContext # Pick the next context. - br restore_sp_from_pxCurrentTCB # Switch in the task context and restore. + movia r15, vTaskSwitchContext # Pick the next context - use long call version in place of "call" + callr r15 + br restore_sp_from_pxCurrentTCB # Switch in the task context and restore. .section .exceptions.unknown.user exceptions_unknown_user: From 1ad889946b4978e0c73156dbc0cebb8f42b3de3a Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Fri, 28 Oct 2022 10:41:56 +0530 Subject: [PATCH 124/164] Update Cortex-M55 and Cortex-M85 ports (#579) These were missed when PR #59 was merged. Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal --- portable/GCC/ARM_CM55/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM85/non_secure/port.c | 460 +++++++++++--------- portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM55/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM85/non_secure/port.c | 460 +++++++++++--------- portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 460 +++++++++++--------- 8 files changed, 2072 insertions(+), 1608 deletions(-) diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index d746923dfee..9976daee49a 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -78,20 +78,13 @@ #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) -#ifndef configSYSTICK_CLOCK_HZ - #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ - /* Ensure the SysTick is clocked at the same frequency as the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) -#else - -/* The way the SysTick is clocked is not modified in case it is not the - * same a the core. */ - #define portNVIC_SYSTICK_CLK_BIT ( 0 ) -#endif /*-----------------------------------------------------------*/ /** @@ -199,7 +192,7 @@ * have occurred while the SysTick counter is stopped during tickless idle * calculations. */ -#define portMISSED_COUNTS_FACTOR ( 45UL ) +#define portMISSED_COUNTS_FACTOR ( 94UL ) /*-----------------------------------------------------------*/ /** @@ -258,6 +251,20 @@ #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + /** * @brief Let the user override the pre-loading of the initial LR with the * address of prvTaskExitError() in case it messes up unwinding of the stack @@ -386,7 +393,7 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; #if ( configUSE_TICKLESS_IDLE == 1 ) __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) { - uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; TickType_t xModifiableIdleTime; /* Make sure the SysTick reload value does not overflow the counter. */ @@ -395,22 +402,6 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; xExpectedIdleTime = xMaximumPossibleSuppressedTicks; } - /* Stop the SysTick momentarily. The time the SysTick is stopped for is - * accounted for as best it can be, but using the tickless mode will - * inevitably result in some tiny drift of the time maintained by the - * kernel with respect to calendar time. */ - portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; - - /* Calculate the reload value required to wait xExpectedIdleTime - * tick periods. -1 is used because this code will execute part way - * through one of the tick periods. */ - ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); - - if( ulReloadValue > ulStoppedTimerCompensation ) - { - ulReloadValue -= ulStoppedTimerCompensation; - } - /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ __asm volatile ( "cpsid i" ::: "memory" ); @@ -418,26 +409,52 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; __asm volatile ( "isb" ); /* If a context switch is pending or a task is waiting for the scheduler - * to be un-suspended then abandon the low power entry. */ + * to be unsuspended then abandon the low power entry. */ if( eTaskConfirmSleepModeStatus() == eAbortSleep ) { - /* Restart from whatever is left in the count register to complete - * this tick period. */ - portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; - - /* Restart SysTick. */ - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - - /* Reset the reload register to the value required for normal tick - * periods. */ - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; - - /* Re-enable interrupts - see comments above the cpsid instruction() + /* Re-enable interrupts - see comments above the cpsid instruction * above. */ __asm volatile ( "cpsie i" ::: "memory" ); } else { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + /* Set the new reload value. */ portNVIC_SYSTICK_LOAD_REG = ulReloadValue; @@ -448,12 +465,11 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; /* Restart SysTick. */ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; - /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can - * set its parameter to 0 to indicate that its implementation - * contains its own wait for interrupt or wait for event - * instruction, and so wfi should not be executed again. However, - * the original expected idle time variable must remain unmodified, - * so a copy is taken. */ + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ xModifiableIdleTime = xExpectedIdleTime; configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); @@ -467,48 +483,44 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); /* Re-enable interrupts to allow the interrupt that brought the MCU - * out of sleep mode to execute immediately. See comments above + * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ __asm volatile ( "cpsie i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable interrupts again because the clock is about to be stopped - * and interrupts that execute while the clock is stopped will - * increase any slippage between the time maintained by the RTOS and - * calendar time. */ + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ __asm volatile ( "cpsid i" ::: "memory" ); __asm volatile ( "dsb" ); __asm volatile ( "isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the - * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. - * Again, the time the SysTick is stopped for is accounted for as - * best it can be, but using the tickless mode will inevitably - * result in some tiny drift of the time maintained by the kernel - * with respect to calendar time*/ - portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); - - /* Determine if the SysTick clock has already counted to zero and - * been set back to the current reload value (the reload back being - * correct for the entire expected idle time) or if the SysTick is - * yet to count to zero (in which case an interrupt other than the - * SysTick must have brought the system out of sleep mode). */ + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) { uint32_t ulCalculatedLoadValue; - /* The tick interrupt is already pending, and the SysTick count - * reloaded with ulReloadValue. Reset the - * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick - * period. */ + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); /* Don't allow a tiny value, or values that have somehow * underflowed because the post sleep hook did something - * that took too long. */ - if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) { ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); } @@ -516,17 +528,36 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; /* As the pending tick will be processed as soon as this - * function exits, the tick value maintained by the tick is - * stepped forward by one less than the time spent waiting. */ + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ ulCompleteTickPeriods = xExpectedIdleTime - 1UL; } else { - /* Something other than the tick interrupt ended the sleep. - * Work out how long the sleep lasted rounded to complete tick + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick * periods (not the ulReload value which accounted for part * ticks). */ - ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; /* How many complete tick periods passed while the processor * was waiting? */ @@ -537,13 +568,39 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; } - /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG - * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard - * value. */ + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; - portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ vTaskStepTick( ulCompleteTickPeriods ); - portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; /* Exit with interrupts enabled. */ __asm volatile ( "cpsie i" ::: "memory" ); @@ -556,11 +613,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU { /* Calculate the constants required to configure the tick interrupt. */ #if ( configUSE_TICKLESS_IDLE == 1 ) - { - ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); - xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; - ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); - } + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } #endif /* configUSE_TICKLESS_IDLE */ /* Stop and reset the SysTick. */ @@ -569,7 +626,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FU /* Configure SysTick to interrupt at the requested rate. */ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; - portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; } /*-----------------------------------------------------------*/ @@ -694,10 +751,10 @@ static void prvTaskExitError( void ) static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ { #if ( configENABLE_TRUSTZONE == 1 ) - { - /* Enable non-secure access to the FPU. */ - SecureInit_EnableNSFPUAccess(); - } + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } #endif /* configENABLE_TRUSTZONE */ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and @@ -810,22 +867,22 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO ulR0 = pulCallerStackAddress[ 0 ]; #if ( configENABLE_MPU == 1 ) - { - /* Read the CONTROL register value. */ - __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); - /* The task that raised the SVC is privileged if Bit[0] - * in the CONTROL register is 0. */ - ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); - } + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } #else /* if ( configENABLE_MPU == 1 ) */ - { - /* Allocate and load a context for the secure task. */ - xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); - } + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } #endif /* configENABLE_MPU */ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); @@ -833,6 +890,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO break; case portSVC_FREE_SECURE_CONTEXT: + /* R0 contains TCB being freed and R1 contains the secure * context handle to be freed. */ ulR0 = pulCallerStackAddress[ 0 ]; @@ -845,21 +903,21 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO case portSVC_START_SCHEDULER: #if ( configENABLE_TRUSTZONE == 1 ) - { - /* De-prioritize the non-secure exceptions so that the - * non-secure pendSV runs at the lowest priority. */ - SecureInit_DePrioritizeNSExceptions(); + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); - /* Initialize the secure context management system. */ - SecureContext_Init(); - } + /* Initialize the secure context management system. */ + SecureContext_Init(); + } #endif /* configENABLE_TRUSTZONE */ #if ( configENABLE_FPU == 1 ) - { - /* Setup the Floating Point Unit (FPU). */ - prvSetupFPU(); - } + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } #endif /* configENABLE_FPU */ /* Setup the context of the first task so that the first task starts @@ -904,105 +962,105 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Simulate the stack frame as it would be created by a context switch * interrupt. */ #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + #if ( configENABLE_TRUSTZONE == 1 ) + { pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ - - #if ( configENABLE_TRUSTZONE == 1 ) - { - pxTopOfStack--; - *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ - } - #endif /* configENABLE_TRUSTZONE */ + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ } + #endif /* configENABLE_TRUSTZONE */ + } #endif /* portPRELOAD_REGISTERS */ return pxTopOfStack; @@ -1016,10 +1074,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; #if ( configENABLE_MPU == 1 ) - { - /* Setup the Memory Protection Unit (MPU). */ - prvSetupMPU(); - } + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } #endif /* configENABLE_MPU */ /* Start the timer that generates the tick ISR. Interrupts are disabled From f209c6efa5cf2a3b78a12fd8081789135ad78af5 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Tue, 8 Nov 2022 14:05:35 +0530 Subject: [PATCH 125/164] Fix context switch when time slicing is off (#568) * Fix context switch when time slicing is off When time slicing is off, context switch should only happen when a task with priority higher than the currently executing one is unblocked. Earlier the code was invoking a context switch even when a task with priority equal the currently executing task was unblocked. This commit fixes the code to only do a context switch when a higher priority task is unblocked. Signed-off-by: Gaurav Aggarwal --- tasks.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index 4f25f344be8..027d64844fa 100644 --- a/tasks.c +++ b/tasks.c @@ -4090,10 +4090,14 @@ BaseType_t xTaskIncrementTick( void ) #if ( configNUM_CORES == 1 ) { /* Preemption is on, but a context switch should - * only be performed if the unblocked task has a - * priority that is equal to or higher than the - * currently executing task. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + * only be performed if the unblocked task's + * priority is higher than the currently executing + * task. + * The case of equal priority tasks sharing + * processing time (which happens when both + * preemption and time slicing are on) is + * handled below.*/ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xSwitchRequired = pdTRUE; } From 15a7e2daae33d93c32c8d67263b91601bdbc82e7 Mon Sep 17 00:00:00 2001 From: Ching-Hsin Lee Date: Thu, 10 Nov 2022 10:32:28 +0800 Subject: [PATCH 126/164] Merge commit "Add support for retrieving a task's uxCoreAffinityMask with the vTaskGetInfo() API" * Merge commit 8128208bdee1f997f83cae631b861f36aeea9b1f --- include/task.h | 3 +++ tasks.c | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/include/task.h b/include/task.h index 4390fb894ee..b97dac3dca9 100644 --- a/include/task.h +++ b/include/task.h @@ -166,6 +166,9 @@ typedef struct xTASK_STATUS StackType_t * pxEndOfStack; /* Points to the end address of the task's stack area. */ #endif configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) ) + UBaseType_t uxCoreAffinityMask; /* The core affinity mask for the task */ + #endif } TaskStatus_t; /* Possible return values for eTaskConfirmSleepModeStatus(). */ diff --git a/tasks.c b/tasks.c index 027d64844fa..6363d6cd507 100644 --- a/tasks.c +++ b/tasks.c @@ -5337,6 +5337,12 @@ static void prvCheckTasksWaitingTermination( void ) #endif pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) ) + { + pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + #endif + #if ( configUSE_MUTEXES == 1 ) { pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; From 82b4cc2d1cd5943230d62308e6fc744d70604a0e Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 15 Nov 2022 13:56:43 +0800 Subject: [PATCH 127/164] Use taskENTER/EXIT_CRITICAL_FROM_ISR (#38) * Enter critical section from is implemented differently for single core and smp. Use taskENTER/EXIT_CRITICAL_FROM_ISR in source. --- .github/lexicon.txt | 3 +++ event_groups.c | 4 +-- include/FreeRTOS.h | 16 ++++++++++++ include/task.h | 22 +++++++++++----- .../ThirdParty/GCC/RP2040/include/portmacro.h | 19 ++++++-------- portable/ThirdParty/GCC/RP2040/port.c | 4 +-- queue.c | 16 ++++++------ stream_buffer.c | 16 ++++++------ tasks.c | 26 +++++++++---------- 9 files changed, 75 insertions(+), 51 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index b815e2faca5..b1cabaf4fd9 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1532,6 +1532,7 @@ prvinitialisenewtimer prvinsertblockintofreelist prvlockqueue prvnotifyqueuesetcontainer +prvminimalidletask prvportmalloc prvportresetpic prvprocesssimulatedinterrupts @@ -2455,6 +2456,7 @@ uxsemaphoregetcount uxsemaphoregetcountfromisr uxstate uxstreambuffernumber +uxtaskattributes uxtaskgetnumberoftasks uxtaskgetstackhighwatermark uxtaskgetsystemstate @@ -2826,6 +2828,7 @@ xlist xlistend xlowestpriority xlowestprioritycore +xlowestprioritytopreempt xmair xmaxcount xmaxexpirycountbeforestopping diff --git a/event_groups.c b/event_groups.c index 4d40f5aec83..c9dd6839fae 100644 --- a/event_groups.c +++ b/event_groups.c @@ -536,11 +536,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 55eaf976cbe..cd113862347 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -446,6 +446,22 @@ #endif /* portCHECK_IF_IN_ISR */ +#ifndef portENTER_CRITICAL_FROM_ISR + + #if ( configNUM_CORES > 1 ) + #error portENTER_CRITICAL_FROM_ISR is required in SMP + #endif + +#endif + +#ifndef portEXIT_CRITICAL_FROM_ISR + + #if ( configNUM_CORES > 1 ) + #error portEXIT_CRITICAL_FROM_ISR is required in SMP + #endif + +#endif + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 diff --git a/include/task.h b/include/task.h index b97dac3dca9..ddf0bd87505 100644 --- a/include/task.h +++ b/include/task.h @@ -203,7 +203,7 @@ typedef enum * \defgroup taskYIELD taskYIELD * \ingroup SchedulerControl */ -#define taskYIELD() portYIELD() +#define taskYIELD() portYIELD() /** * task. h @@ -217,8 +217,12 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() -#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#if ( configNUM_CORES == 1 ) + #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#else + #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() +#endif /** * task. h @@ -232,8 +236,12 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() -#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#if ( configNUM_CORES == 1 ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#else + #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) +#endif /** * task. h @@ -243,7 +251,7 @@ typedef enum * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS * \ingroup SchedulerControl */ -#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() /** * task. h @@ -253,7 +261,7 @@ typedef enum * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS * \ingroup SchedulerControl */ -#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() /* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is * 0 to generate more optimal code when configASSERT() is defined as the constant diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 7239e05f83d..4a5ef24d8ea 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -150,17 +150,10 @@ #define portCLEAR_INTERRUPT_MASK(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) - #if configNUM_CORES == 1 - extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) - #else - extern UBaseType_t vTaskEnterCriticalFromISR( void ); - extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); - #define portSET_INTERRUPT_MASK_FROM_ISR() vTaskEnterCriticalFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vTaskExitCriticalFromISR( x ) - #endif + extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); + extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) @@ -175,8 +168,12 @@ #else extern void vTaskEnterCritical( void ); extern void vTaskExitCritical( void ); + extern UBaseType_t vTaskEnterCriticalFromISR( void ); + extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #define portENTER_CRITICAL() vTaskEnterCritical() #define portEXIT_CRITICAL() vTaskExitCritical() + #define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x ) #endif #define portRTOS_SPINLOCK_COUNT 2 diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index d93042f796a..22c18ecec58 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -715,7 +715,7 @@ void xPortSysTickHandler( void ) { uint32_t ulPreviousMask; - ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + ulPreviousMask = taskENTER_CRITICAL_FROM_ISR(); { /* Increment the RTOS tick. */ if( xTaskIncrementTick() != pdFALSE ) @@ -724,7 +724,7 @@ void xPortSysTickHandler( void ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); + taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask ); } /*-----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index 47c6a737d49..16649527041 100644 --- a/queue.c +++ b/queue.c @@ -1090,7 +1090,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1215,7 +1215,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1261,7 +1261,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1381,7 +1381,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1942,7 +1942,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2002,7 +2002,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -2036,7 +2036,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2057,7 +2057,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index b81f072fb1a..4c80bfd742e 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -98,7 +98,7 @@ { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ @@ -175,7 +175,7 @@ { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,7 +186,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -1192,7 +1192,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1208,7 +1208,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1223,7 +1223,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1239,7 +1239,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/tasks.c b/tasks.c index 175084f0d2b..74bf3101dd3 100644 --- a/tasks.c +++ b/tasks.c @@ -374,7 +374,7 @@ typedef tskTCB TCB_t; #if ( configNUM_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else - portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -2281,14 +2281,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptState = taskENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptState ); return uxReturn; } @@ -2947,7 +2947,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3003,7 +3003,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xYieldRequired; } @@ -4301,11 +4301,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -6075,7 +6075,7 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); if( pxCurrentTCB->uxCriticalNesting == 0U ) { @@ -6182,7 +6182,7 @@ static void prvResetNextTaskUnblockTime( void ) xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; portRELEASE_ISR_LOCK(); - portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); /* When a task yields in a critical section it just sets * xYieldPending to true. So now that we have exited the @@ -6843,7 +6843,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -6957,7 +6957,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUM_CORES == 1 ) */ } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -6998,7 +6998,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -7068,7 +7068,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUM_CORES == 1 ) */ } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ From f257f6d06cbcf1c2e9da951facdf8cdde017521e Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 24 Nov 2022 00:03:26 +0800 Subject: [PATCH 128/164] Improve single core unit test coverage (#42) * prvCreateIldeTask use configNUM_CORES * First time yield in idle task in SMP only * prvCheckTasksWaitingTermination check pxTCB NULL pointer for SMP only. Single core won't have to check the pxTCB --- tasks.c | 215 ++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 132 insertions(+), 83 deletions(-) diff --git a/tasks.c b/tasks.c index 74bf3101dd3..b2ae1be3a92 100644 --- a/tasks.c +++ b/tasks.c @@ -3014,31 +3014,59 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, static BaseType_t prvCreateIdleTasks( void ) { BaseType_t xReturn = pdPASS; - BaseType_t xCoreID; - char cIdleName[ configMAX_TASK_NAME_LEN ]; - /* Add each idle task at the lowest priority. */ - for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + #if ( configNUM_CORES == 1 ) { - BaseType_t x; - - if( xReturn == pdFAIL ) + /* Add the idle task at the lowest priority. */ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - break; + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ 0 ] = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandles[ 0 ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } } - else + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ 0 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + #else + { + BaseType_t xCoreID; + char cIdleName[ configMAX_TASK_NAME_LEN ]; - for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + /* Add each idle task at the lowest priority. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { - cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; + BaseType_t x; - /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than - * configMAX_TASK_NAME_LEN characters just in case the memory after the - * string is not accessible (extremely unlikely). */ - if( cIdleName[ x ] == ( char ) 0x00 ) + if( xReturn == pdFAIL ) { break; } @@ -3046,11 +3074,25 @@ static BaseType_t prvCreateIdleTasks( void ) { mtCOVERAGE_TEST_MARKER(); } - } - /* Append the idle task number to the end of the name if there is space. */ - #if ( configNUM_CORES > 1 ) - { + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( cIdleName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Append the idle task number to the end of the name if there is space. */ if( x < configMAX_TASK_NAME_LEN ) { cIdleName[ x++ ] = ( char ) xCoreID + '0'; @@ -3069,30 +3111,26 @@ static BaseType_t prvCreateIdleTasks( void ) { mtCOVERAGE_TEST_MARKER(); } - } - #endif /* #if ( configNUM_CORES > 1 ) */ - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - { - if( xCoreID == 0 ) + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, - cIdleName, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } + if( xCoreID == 0 ) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; - #if ( configNUM_CORES > 1 ) + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } else { static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; @@ -3106,31 +3144,28 @@ static BaseType_t prvCreateIdleTasks( void ) xIdleTaskStackBuffers[ xCoreID - 1 ], &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #endif /* #if ( configNUM_CORES > 1 ) */ - if( xIdleTaskHandles[ xCoreID ] != NULL ) - { - xReturn = pdPASS; - } - else - { - xReturn = pdFAIL; + if( xIdleTaskHandles[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } } - } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ - { - if( xCoreID == 0 ) + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - - #if ( configNUM_CORES > 1 ) + if( xCoreID == 0 ) + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } else { xReturn = xTaskCreate( prvMinimalIdleTask, @@ -3140,10 +3175,11 @@ static BaseType_t prvCreateIdleTasks( void ) portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #endif /* #if ( configNUM_CORES > 1 ) */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ } - #endif /* configSUPPORT_STATIC_ALLOCATION */ } + #endif return xReturn; } @@ -4984,9 +5020,13 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * any. */ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); - /* All cores start up in the idle task. This initial yield gets the application - * tasks started. */ - taskYIELD(); + #if ( configNUM_CORES > 1 ) + { + /* SMP all cores start up in the idle task. This initial yield gets the application + * tasks started. */ + taskYIELD(); + } + #endif /* #if ( configNUM_CORES > 1 ) */ for( ; ; ) { @@ -5260,22 +5300,32 @@ static void prvCheckTasksWaitingTermination( void ) #if ( INCLUDE_vTaskDelete == 1 ) { - TCB_t * pxTCB = NULL; + TCB_t * pxTCB; /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() * being called too often in the idle task. */ while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - taskENTER_CRITICAL(); + #if ( configNUM_CORES == 1 ) { - #if ( configNUM_CORES == 1 ) + taskENTER_CRITICAL(); { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } } - #else /* #if( configNUM_CORES == 1 ) */ + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); + } + #else /* #if( configNUM_CORES == 1 ) */ + { + pxTCB = NULL; + + taskENTER_CRITICAL(); { /* For SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -5300,15 +5350,14 @@ static void prvCheckTasksWaitingTermination( void ) } } } - #endif /* #if( configNUM_CORES == 1 ) */ - } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL(); - if( pxTCB != NULL ) - { - prvDeleteTCB( pxTCB ); - pxTCB = NULL; + if( pxTCB != NULL ) + { + prvDeleteTCB( pxTCB ); + } } + #endif /* #if( configNUM_CORES == 1 ) */ } } #endif /* INCLUDE_vTaskDelete */ From 9090fcc1ace1328c9cfdf608690e7f483759f3f3 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 24 Nov 2022 13:05:13 +0800 Subject: [PATCH 129/164] Yield for task when core affinity changed (#41) * Yield for task when the task is linked to new allowed cores Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal --- tasks.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tasks.c b/tasks.c index b2ae1be3a92..38951dd98f7 100644 --- a/tasks.c +++ b/tasks.c @@ -2521,11 +2521,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { TCB_t * pxTCB; BaseType_t xCoreID; + UBaseType_t uxPrevCoreAffinityMask; + + #if ( configUSE_PREEMPTION == 1 ) + UBaseType_t uxPrevNotAllowedCores; + #endif taskENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); + uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask; pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; if( xSchedulerRunning != pdFALSE ) @@ -2534,11 +2540,35 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + /* If the task can no longer run on the core it was running, + * request the core to yield. */ if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) == 0 ) { prvYieldCore( xCoreID ); } } + else + { + #if ( configUSE_PREEMPTION == 1 ) + { + /* Calculate the cores on which this task was not allowed to + * run previously. */ + uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1 << configNUM_CORES ) - 1 ); + + /* Does the new core mask enables this task to run on any of the + * previously not allowed cores? If yes, check if this task can be + * scheduled on any of those cores. */ + if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + } + #else /* #if( configUSE_PREEMPTION == 1 ) */ + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* #if( configUSE_PREEMPTION == 1 ) */ + } } } taskEXIT_CRITICAL(); From c9ba84d0cea8a069970ed54b9e2f1ac4083174bf Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 24 Nov 2022 13:06:53 +0800 Subject: [PATCH 130/164] Remove builtin clz in prvSelectHighestPriorityTask (#37) * Remove builtin clz in prvSelectHighestPriorityTask --- tasks.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/tasks.c b/tasks.c index 38951dd98f7..d2d9c6a6753 100644 --- a/tasks.c +++ b/tasks.c @@ -1028,6 +1028,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; BaseType_t xLowestPriorityCore = -1; + BaseType_t x; if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { @@ -1050,33 +1051,33 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); - while( uxCoreMap != 0 ) + for( x = ( configNUM_CORES - 1 ); x >= 0; x-- ) { - uint32_t uxCore; + UBaseType_t uxCore = ( UBaseType_t ) x; BaseType_t xTaskPriority; - uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); - configASSERT( taskVALID_CORE_ID( uxCore ) ); - - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; - - if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( ( uxCoreMap & ( 1 << uxCore ) ) != 0 ) { - xTaskPriority = xTaskPriority - ( BaseType_t ) 1; - } + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + + if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } - uxCoreMap &= ~( 1 << uxCore ); + uxCoreMap &= ~( 1 << uxCore ); - if( ( xTaskPriority < xLowestPriority ) && - ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && - ( xYieldPendings[ uxCore ] == pdFALSE ) ) - { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif + if( ( xTaskPriority < xLowestPriority ) && + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && + ( xYieldPendings[ uxCore ] == pdFALSE ) ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } } } } From 31a7e853c523d3ed2313e0d6aed38be01245aaf0 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 29 Nov 2022 17:20:02 +0800 Subject: [PATCH 131/164] Move critical nesting count to port (#47) * Move the critical nesting management to port layer Signed-off-by: Gaurav Aggarwal * Move critical nesting in TCB macro to tasks.c * Add RP2040 support maintain critical nesting count in TCB * Fix formatting * RP2040 maintain critical nesting count in port * Fix constant type Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal --- include/FreeRTOS.h | 6 - .../ThirdParty/GCC/RP2040/include/portmacro.h | 103 ++++---- portable/ThirdParty/GCC/RP2040/port.c | 38 +-- tasks.c | 237 +++++++++++------- 4 files changed, 223 insertions(+), 161 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index cd113862347..5a5d430ccce 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -356,12 +356,6 @@ #define configRUN_MULTIPLE_PRIORITIES 0 #endif -#if ( configNUM_CORES > 1 ) - #if portCRITICAL_NESTING_IN_TCB == 0 - #error portCRITICAL_NESTING_IN_TCB is required in SMP - #endif -#endif - #ifndef portGET_CORE_ID #if ( configNUM_CORES == 1 ) diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 4a5ef24d8ea..8f46a25d5bc 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -129,15 +129,24 @@ #define portGET_CORE_ID() 0 #endif - #define portCHECK_IF_IN_ISR() ({ \ - uint32_t ulIPSR; \ - __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ - ((uint8_t)ulIPSR)>0;}) - - void vYieldCore(int xCoreID); - #define portYIELD_CORE(a) vYieldCore(a) - #define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) - + #define portCHECK_IF_IN_ISR() ({ \ + uint32_t ulIPSR; \ + __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ + ((uint8_t)ulIPSR)>0;}) + + void vYieldCore(int xCoreID); + #define portYIELD_CORE(a) vYieldCore(a) + #define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + +/*-----------------------------------------------------------*/ + +/* Critical nesting count management. */ + extern UBaseType_t uxCriticalNestings[ configNUM_CORES ]; + #define portGET_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ] ) + #define portSET_CRITICAL_NESTING_COUNT( x ) ( uxCriticalNestings[ portGET_CORE_ID() ] = ( x ) ) + #define portINCREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]++ ) + #define portDECREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]-- ) + /*-----------------------------------------------------------*/ /* Critical section management. */ @@ -176,44 +185,44 @@ #define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x ) #endif - #define portRTOS_SPINLOCK_COUNT 2 - - /* Note this is a single method with uxAcquire parameter since we have - * static vars, the method is always called with a compile time constant for - * uxAcquire, and the compiler should dothe right thing! */ - static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { - static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; - static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; - configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); - uint32_t ulCoreNum = get_core_num(); - uint32_t ulLockBit = 1u << ulLockNum; - configASSERT(ulLockBit < 256u ); - if( uxAcquire ) - { - if( __builtin_expect( !*pxSpinLock, 0 ) ) - { - if( ucOwnedByCore[ulCoreNum] & ulLockBit ) - { - configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); - ucRecursionCountByLock[ulLockNum]++; - return; - } - while ( __builtin_expect( !*pxSpinLock, 0 ) ); - } - __mem_fence_acquire(); - configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); - ucRecursionCountByLock[ulLockNum] = 1; - ucOwnedByCore[ulCoreNum] |= ulLockBit; - } else { - configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); - configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); - if( !--ucRecursionCountByLock[ulLockNum] ) - { - ucOwnedByCore[ulCoreNum] &= ~ulLockBit; - __mem_fence_release(); - *pxSpinLock = 1; - } - } + #define portRTOS_SPINLOCK_COUNT 2 + + /* Note this is a single method with uxAcquire parameter since we have + * static vars, the method is always called with a compile time constant for + * uxAcquire, and the compiler should dothe right thing! */ + static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { + static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; + configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); + uint32_t ulCoreNum = get_core_num(); + uint32_t ulLockBit = 1u << ulLockNum; + configASSERT(ulLockBit < 256u ); + if( uxAcquire ) + { + if( __builtin_expect( !*pxSpinLock, 0 ) ) + { + if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + { + configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); + ucRecursionCountByLock[ulLockNum]++; + return; + } + while ( __builtin_expect( !*pxSpinLock, 0 ) ); + } + __mem_fence_acquire(); + configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); + ucRecursionCountByLock[ulLockNum] = 1; + ucOwnedByCore[ulCoreNum] |= ulLockBit; + } else { + configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); + configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); + if( !--ucRecursionCountByLock[ulLockNum] ) + { + ucOwnedByCore[ulCoreNum] &= ~ulLockBit; + __mem_fence_release(); + *pxSpinLock = 1; + } + } } #if ( configNUM_CORES == 1 ) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index 22c18ecec58..c149d14c96f 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -115,7 +115,11 @@ static void prvTaskExitError( void ); /* Each task maintains its own interrupt status in the critical nesting * variable. This is initialized to 0 to allow vPortEnter/ExitCritical * to be called before the scheduler is started */ -static UBaseType_t uxCriticalNesting; +#if ( configNUM_CORES == 1 ) + static UBaseType_t uxCriticalNesting; +#else /* #if ( configNUM_CORES == 1 ) */ + UBaseType_t uxCriticalNestings[ configNUM_CORES ] = { 0 }; +#endif /* #if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -463,24 +467,28 @@ void vPortYield( void ) /*-----------------------------------------------------------*/ -void vPortEnterCritical( void ) -{ - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - __asm volatile ( "dsb" ::: "memory" ); - __asm volatile ( "isb" ); -} +#if ( configNUM_CORES == 1 ) + void vPortEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); + } +#endif /* #if ( configNUM_CORES == 1 ) */ /*-----------------------------------------------------------*/ -void vPortExitCritical( void ) -{ - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - if( uxCriticalNesting == 0 ) +#if ( configNUM_CORES == 1 ) + void vPortExitCritical( void ) { - portENABLE_INTERRUPTS(); + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } } -} +#endif /* #if ( configNUM_CORES == 1 ) */ void vPortEnableInterrupts( void ) { diff --git a/tasks.c b/tasks.c index d2d9c6a6753..704120d5000 100644 --- a/tasks.c +++ b/tasks.c @@ -280,6 +280,13 @@ typedef BaseType_t TaskRunning_t; /* Indicates that the task is an Idle task. */ #define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1UL << 0UL ) +#if ( ( configNUM_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) + #define portGET_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting ) + #define portSET_CRITICAL_NESTING_COUNT( x ) ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) ) + #define portINCREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ ) + #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- ) +#endif /* #if ( ( configNUM_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */ + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -688,7 +695,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * the suspension and critical nesting counts, as well as release * and reacquire the correct locks. And then, do it all over again * if our state changed again during the reacquisition. */ - uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; + uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT(); uxPrevSchedulerSuspended = uxSchedulerSuspended; /* This must only be called the first time we enter into a critical @@ -698,7 +705,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting > 0U ) { - pxThisTCB->uxCriticalNesting = 0U; + portSET_CRITICAL_NESTING_COUNT( 0U ); } else { @@ -722,7 +729,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; portDISABLE_INTERRUPTS(); portGET_TASK_LOCK(); portGET_ISR_LOCK(); - pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; + + portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting ); uxSchedulerSuspended = uxPrevSchedulerSuspended; if( uxPrevCriticalNesting == 0U ) @@ -776,7 +784,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xCoreID; /* This must be called from a critical section. */ - configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) @@ -3059,12 +3067,12 @@ static BaseType_t prvCreateIdleTasks( void ) * address of the RAM then create the idle task. */ vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); xIdleTaskHandles[ 0 ] = xTaskCreateStatic( prvIdleTask, - configIDLE_TASK_NAME, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ if( xIdleTaskHandles[ 0 ] != NULL ) { @@ -3082,12 +3090,12 @@ static BaseType_t prvCreateIdleTasks( void ) configIDLE_TASK_NAME, configMINIMAL_STACK_SIZE, ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ &xIdleTaskHandles[ 0 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } #endif /* configSUPPORT_STATIC_ALLOCATION */ } - #else + #else /* #if ( configNUM_CORES == 1 ) */ { BaseType_t xCoreID; char cIdleName[ configMAX_TASK_NAME_LEN ]; @@ -3210,7 +3218,7 @@ static BaseType_t prvCreateIdleTasks( void ) #endif /* configSUPPORT_STATIC_ALLOCATION */ } } - #endif + #endif /* #if ( configNUM_CORES == 1 ) */ return xReturn; } @@ -3367,7 +3375,7 @@ void vTaskSuspendAll( void ) if( uxSchedulerSuspended == 1U ) { - if( pxCurrentTCB->uxCriticalNesting == 0U ) + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { prvCheckForRunStateChange(); } @@ -4501,10 +4509,9 @@ BaseType_t xTaskIncrementTick( void ) portGET_ISR_LOCK(); { /* vTaskSwitchContext() must never be called from within a critical section. - * This is not necessarily true for single core FreeRTOS, but it is for this SMP port. */ - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); - #endif + * This is not necessarily true for single core FreeRTOS, but it is for this + * SMP port. */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 ); if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) { @@ -6069,7 +6076,7 @@ static void prvResetNextTaskUnblockTime( void ) */ void vTaskYieldWithinAPI( void ) { - if( pxCurrentTCB->uxCriticalNesting == 0U ) + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { portYIELD(); } @@ -6082,7 +6089,7 @@ static void prvResetNextTaskUnblockTime( void ) /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) void vTaskEnterCritical( void ) { @@ -6090,52 +6097,63 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - #if ( configNUM_CORES == 1 ) - { - ( pxCurrentTCB->uxCriticalNesting )++; + ( pxCurrentTCB->uxCriticalNesting )++; - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( pxCurrentTCB->uxCriticalNesting == 1 ) - { - portASSERT_IF_IN_ISR(); - } + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); } - #else /* #if ( configNUM_CORES == 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_CORES > 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { - if( pxCurrentTCB->uxCriticalNesting == 0U ) - { - portGET_TASK_LOCK(); - portGET_ISR_LOCK(); - } + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + } - ( pxCurrentTCB->uxCriticalNesting )++; + portINCREMENT_CRITICAL_NESTING_COUNT(); - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( pxCurrentTCB->uxCriticalNesting == 1 ) - { - portASSERT_IF_IN_ISR(); + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( portGET_CRITICAL_NESTING_COUNT() == 1U ) + { + portASSERT_IF_IN_ISR(); - if( uxSchedulerSuspended == 0U ) - { - /* The only time there would be a problem is if this is called - * before a context switch and vTaskExitCritical() is called - * after pxCurrentTCB changes. Therefore this should not be - * used within vTaskSwitchContext(). */ - prvCheckForRunStateChange(); - } + if( uxSchedulerSuspended == 0U ) + { + /* The only time there would be a problem is if this is called + * before a context switch and vTaskExitCritical() is called + * after pxCurrentTCB changes. Therefore this should not be + * used within vTaskSwitchContext(). */ + prvCheckForRunStateChange(); } } - #endif /* #if ( configNUM_CORES == 1 ) */ } else { @@ -6143,11 +6161,11 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) +#if ( configNUM_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ) { @@ -6157,12 +6175,12 @@ static void prvResetNextTaskUnblockTime( void ) { uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - if( pxCurrentTCB->uxCriticalNesting == 0U ) + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { portGET_ISR_LOCK(); } - ( pxCurrentTCB->uxCriticalNesting )++; + portINCREMENT_CRITICAL_NESTING_COUNT(); } else { @@ -6172,10 +6190,10 @@ static void prvResetNextTaskUnblockTime( void ) return uxSavedInterruptStatus; } -#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) void vTaskExitCritical( void ) { @@ -6195,31 +6213,64 @@ static void prvResetNextTaskUnblockTime( void ) if( pxCurrentTCB->uxCriticalNesting == 0U ) { - #if ( configNUM_CORES == 1 ) - { - portENABLE_INTERRUPTS(); - } - #else - { - BaseType_t xYieldCurrentTask; + portENABLE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) */ +/*-----------------------------------------------------------*/ - portRELEASE_ISR_LOCK(); - portRELEASE_TASK_LOCK(); - portENABLE_INTERRUPTS(); +#if ( configNUM_CORES > 1 ) - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* If critical nesting count is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); + + /* This function should not be called in ISR. Use vTaskExitCriticalFromISR + * to exit critical section from ISR. */ + portASSERT_IF_IN_ISR(); + + if( portGET_CRITICAL_NESTING_COUNT() > 0U ) + { + portDECREMENT_CRITICAL_NESTING_COUNT(); + + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + BaseType_t xYieldCurrentTask; + + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); } - #endif /* ( configNUM_CORES == 1 ) */ } else { @@ -6237,10 +6288,10 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) +#if ( configNUM_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { @@ -6248,15 +6299,15 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - /* If pxCurrentTCB->uxCriticalNesting is zero then this function + /* If critical nesting count is zero then this function * does not match a previous call to vTaskEnterCritical(). */ - configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); - if( pxCurrentTCB->uxCriticalNesting > 0U ) + if( portGET_CRITICAL_NESTING_COUNT() > 0U ) { - ( pxCurrentTCB->uxCriticalNesting )--; + portDECREMENT_CRITICAL_NESTING_COUNT(); - if( pxCurrentTCB->uxCriticalNesting == 0U ) + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { /* Get the xYieldPending stats inside the critical section. */ xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; @@ -6289,7 +6340,7 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES > 1 ) ) */ +#endif /* #if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) From 89fa1816bd1f796260410dbdd60ea1eb01585c06 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 29 Nov 2022 18:03:36 +0800 Subject: [PATCH 132/164] Rename config num cores (#48) * Rename configNUM_CORES to configNUMBER_OF_CORES --- event_groups.c | 12 +- include/FreeRTOS.h | 42 +- include/task.h | 22 +- .../ThirdParty/GCC/RP2040/include/portmacro.h | 16 +- .../GCC/RP2040/include/rp2040_config.h | 2 +- portable/ThirdParty/GCC/RP2040/port.c | 28 +- queue.c | 30 +- tasks.c | 360 +++++++++--------- timers.c | 6 +- 9 files changed, 259 insertions(+), 259 deletions(-) diff --git a/event_groups.c b/event_groups.c index c9dd6839fae..ee4220fd93b 100644 --- a/event_groups.c +++ b/event_groups.c @@ -258,15 +258,15 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -418,15 +418,15 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 5a5d430ccce..251cb466fc7 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -151,7 +151,7 @@ #error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) #ifndef configUSE_MINIMAL_IDLE_HOOK #error Missing definition: configUSE_MINIMAL_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif @@ -348,8 +348,8 @@ #define portSOFTWARE_BARRIER() #endif -#ifndef configNUM_CORES - #define configNUM_CORES 1 +#ifndef configNUMBER_OF_CORES + #define configNUMBER_OF_CORES 1 #endif #ifndef configRUN_MULTIPLE_PRIORITIES @@ -358,27 +358,27 @@ #ifndef portGET_CORE_ID - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portGET_CORE_ID() 0 #else - #error configNUM_CORES is set to more than 1 then portGET_CORE_ID must also be defined. - #endif /* configNUM_CORES */ + #error configNUMBER_OF_CORES is set to more than 1 then portGET_CORE_ID must also be defined. + #endif /* configNUMBER_OF_CORES */ #endif /* portGET_CORE_ID */ #ifndef portYIELD_CORE - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portYIELD_CORE( x ) portYIELD() #else - #error configNUM_CORES is set to more than 1 then portYIELD_CORE must also be defined. - #endif /* configNUM_CORES */ + #error configNUMBER_OF_CORES is set to more than 1 then portYIELD_CORE must also be defined. + #endif /* configNUMBER_OF_CORES */ #endif /* portYIELD_CORE */ #ifndef portSET_INTERRUPT_MASK - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error portSET_INTERRUPT_MASK is required in SMP #endif @@ -386,7 +386,7 @@ #ifndef portCLEAR_INTERRUPT_MASK - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error portCLEAR_INTERRUPT_MASK is required in SMP #endif @@ -394,7 +394,7 @@ #ifndef portRELEASE_TASK_LOCK - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portRELEASE_TASK_LOCK() #else #error portRELEASE_TASK_LOCK is required in SMP @@ -404,7 +404,7 @@ #ifndef portGET_TASK_LOCK - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portGET_TASK_LOCK() #else #error portGET_TASK_LOCK is required in SMP @@ -414,7 +414,7 @@ #ifndef portRELEASE_ISR_LOCK - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portRELEASE_ISR_LOCK() #else #error portRELEASE_ISR_LOCK is required in SMP @@ -424,7 +424,7 @@ #ifndef portGET_ISR_LOCK - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portGET_ISR_LOCK() #else #error portGET_ISR_LOCK is required in SMP @@ -434,7 +434,7 @@ #ifndef portCHECK_IF_IN_ISR - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error portCHECK_IF_IN_ISR is required in SMP #endif @@ -442,7 +442,7 @@ #ifndef portENTER_CRITICAL_FROM_ISR - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error portENTER_CRITICAL_FROM_ISR is required in SMP #endif @@ -450,7 +450,7 @@ #ifndef portEXIT_CRITICAL_FROM_ISR - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error portEXIT_CRITICAL_FROM_ISR is required in SMP #endif @@ -1133,7 +1133,7 @@ #error configUSE_PREEMPTION must be set to 1 to use task preemption disable #endif -#if ( ( configNUM_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) +#if ( ( configNUMBER_OF_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) #error configUSE_TASK_PREEMPTION_DISABLE is not supported in single core FreeRTOS #endif @@ -1392,13 +1392,13 @@ typedef struct xSTATIC_TCB #if ( portUSING_MPU_WRAPPERS == 1 ) xMPU_SETTINGS xDummy2; #endif - #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) UBaseType_t uxDummy26; #endif StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) BaseType_t xDummy23; UBaseType_t uxDummy24; #endif diff --git a/include/task.h b/include/task.h index ddf0bd87505..458c46503d9 100644 --- a/include/task.h +++ b/include/task.h @@ -166,7 +166,7 @@ typedef struct xTASK_STATUS StackType_t * pxEndOfStack; /* Points to the end address of the task's stack area. */ #endif configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ - #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) ) + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) UBaseType_t uxCoreAffinityMask; /* The core affinity mask for the task */ #endif } TaskStatus_t; @@ -218,7 +218,7 @@ typedef enum * \ingroup SchedulerControl */ #define taskENTER_CRITICAL() portENTER_CRITICAL() -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #else #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() @@ -237,7 +237,7 @@ typedef enum * \ingroup SchedulerControl */ #define taskEXIT_CRITICAL() portEXIT_CRITICAL() -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) #else #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) @@ -271,7 +271,7 @@ typedef enum #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) /* Checks if core ID is valid. */ -#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) +#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUMBER_OF_CORES ) ) ) /*----------------------------------------------------------- * TASK CREATION API @@ -380,7 +380,7 @@ typedef enum TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; #endif -#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const configSTACK_DEPTH_TYPE usStackDepth, @@ -508,7 +508,7 @@ typedef enum StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; #endif /* configSUPPORT_STATIC_ALLOCATION */ -#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const uint32_t ulStackDepth, @@ -597,7 +597,7 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif -#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; @@ -693,7 +693,7 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif -#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; @@ -1288,7 +1288,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; * Passing NULL will set the core affinity mask for the calling task. * * @param uxCoreAffinityMask A bitwise value that indicates the cores on - * which the task can run. Cores are numbered from 0 to configNUM_CORES - 1. + * which the task can run. Cores are numbered from 0 to configNUMBER_OF_CORES - 1. * For example, to ensure that a task can run on core 0 and core 1, set * uxCoreAffinityMask to 0x03. * @@ -1328,7 +1328,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; * * @return The core affinity mask which is a bitwise value that indicates * the cores on which a task can run. Cores are numbered from 0 to - * configNUM_CORES - 1. For example, if a task can run on core 0 and core 1, + * configNUMBER_OF_CORES - 1. For example, if a task can run on core 0 and core 1, * the core affinity mask is 0x03. * * Example usage: @@ -3222,7 +3222,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, * Sets the pointer to the current TCB to the TCB of the highest priority task * that is ready to run. */ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; #else portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 8f46a25d5bc..c6c2c15347a 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -110,20 +110,20 @@ /* Multi-core */ #define portMAX_CORE_COUNT 2 - #ifndef configNUM_CORES - #define configNUM_CORES 2 + #ifndef configNUMBER_OF_CORES + #define configNUMBER_OF_CORES 2 #endif /* Check validity of number of cores specified in config */ - #if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES ) + #if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES ) #error "Invalid number of cores specified in config!" #endif - #if ( configTICK_CORE < 0 || configTICK_CORE > configNUM_CORES ) + #if ( configTICK_CORE < 0 || configTICK_CORE > configNUMBER_OF_CORES ) #error "Invalid tick core specified in config!" #endif /* FreeRTOS core id is always zero based, so always 0 if we're running on only one core */ - #if configNUM_CORES == portMAX_CORE_COUNT + #if configNUMBER_OF_CORES == portMAX_CORE_COUNT #define portGET_CORE_ID() get_core_num() #else #define portGET_CORE_ID() 0 @@ -141,7 +141,7 @@ /*-----------------------------------------------------------*/ /* Critical nesting count management. */ - extern UBaseType_t uxCriticalNestings[ configNUM_CORES ]; + extern UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ]; #define portGET_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ] ) #define portSET_CRITICAL_NESTING_COUNT( x ) ( uxCriticalNestings[ portGET_CORE_ID() ] = ( x ) ) #define portINCREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]++ ) @@ -169,7 +169,7 @@ extern void vPortEnableInterrupts(); #define portENABLE_INTERRUPTS() vPortEnableInterrupts() - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) extern void vPortEnterCritical( void ); extern void vPortExitCritical( void ); #define portENTER_CRITICAL() vPortEnterCritical() @@ -225,7 +225,7 @@ } } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define portGET_ISR_LOCK() #define portRELEASE_ISR_LOCK() #define portGET_TASK_LOCK() diff --git a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h index 195ea7cbd18..c383188a2a9 100644 --- a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h +++ b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h @@ -63,7 +63,7 @@ extern "C" { #endif #endif -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* configTICK_CORE indicates which core should handle the SysTick * interrupts */ #ifndef configTICK_CORE diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index c149d14c96f..75e1f4ef562 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -47,7 +47,7 @@ #endif /* LIB_PICO_MULTICORE */ /* TODO : consider to remove this macro. */ -#define portRUNNING_ON_BOTH_CORES ( configNUM_CORES == portMAX_CORE_COUNT ) +#define portRUNNING_ON_BOTH_CORES ( configNUMBER_OF_CORES == portMAX_CORE_COUNT ) /* Constants required to manipulate the NVIC. */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) @@ -115,11 +115,11 @@ static void prvTaskExitError( void ); /* Each task maintains its own interrupt status in the critical nesting * variable. This is initialized to 0 to allow vPortEnter/ExitCritical * to be called before the scheduler is started */ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) static UBaseType_t uxCriticalNesting; -#else /* #if ( configNUM_CORES == 1 ) */ - UBaseType_t uxCriticalNestings[ configNUM_CORES ] = { 0 }; -#endif /* #if ( configNUM_CORES == 1 ) */ +#else /* #if ( configNUMBER_OF_CORES == 1 ) */ + UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -137,8 +137,8 @@ static void prvTaskExitError( void ); static spin_lock_t * pxCrossCoreSpinLock; #endif - static spin_lock_t * pxYieldSpinLock[ configNUM_CORES ]; - static uint32_t ulYieldSpinLockSaveValue[ configNUM_CORES ]; + static spin_lock_t * pxYieldSpinLock[ configNUMBER_OF_CORES ]; + static uint32_t ulYieldSpinLockSaveValue[ configNUMBER_OF_CORES ]; #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* @@ -218,7 +218,7 @@ void vPortSVCHandler( void ) void vPortStartFirstTask( void ) { -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) __asm volatile ( " .syntax unified \n" " ldr r2, pxCurrentTCBConst1 \n"/* Obtain location of pxCurrentTCB. */ @@ -305,7 +305,7 @@ void vPortStartFirstTask( void ) } #endif -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* * See header file for description. */ @@ -467,7 +467,7 @@ void vPortYield( void ) /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) void vPortEnterCritical( void ) { portDISABLE_INTERRUPTS(); @@ -475,10 +475,10 @@ void vPortYield( void ) __asm volatile ( "dsb" ::: "memory" ); __asm volatile ( "isb" ); } -#endif /* #if ( configNUM_CORES == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) void vPortExitCritical( void ) { configASSERT( uxCriticalNesting ); @@ -488,7 +488,7 @@ void vPortYield( void ) portENABLE_INTERRUPTS(); } } -#endif /* #if ( configNUM_CORES == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ void vPortEnableInterrupts( void ) { @@ -543,7 +543,7 @@ void vYieldCore( int xCoreID ) void xPortPendSVHandler( void ) { /* This is a naked function. */ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) __asm volatile ( " .syntax unified \n" diff --git a/queue.c b/queue.c index 16649527041..c85ad3a3691 100644 --- a/queue.c +++ b/queue.c @@ -89,11 +89,11 @@ typedef struct SemaphoreData * performed just because a higher priority task has been woken. */ #define queueYIELD_IF_USING_PREEMPTION() #else - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* @@ -1025,15 +1025,15 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * is also a higher priority task in the pending ready list. */ if( xTaskResumeAll() == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } else @@ -1494,15 +1494,15 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1685,15 +1685,15 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1871,15 +1871,15 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { diff --git a/tasks.c b/tasks.c index 704120d5000..4418e15bf20 100644 --- a/tasks.c +++ b/tasks.c @@ -64,7 +64,7 @@ * performed just because a higher priority task has been woken. */ #define taskYIELD_IF_USING_PREEMPTION() #else - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() #else #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() @@ -164,7 +164,7 @@ #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) #error configUSE_PORT_OPTIMISED_TASK_SELECTION not supported in FreeRTOS SMP. #endif @@ -269,23 +269,23 @@ typedef BaseType_t TaskRunning_t; * taskTASK_IS_YIELDING - Returns pdTRUE if the task is actively running * but scheduled to yield. */ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) #else - #define taskTASK_IS_RUNNING( pxTCB ) ( ( pxTCB->xTaskRunState >= 0 ) && ( pxTCB->xTaskRunState < configNUM_CORES ) ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( pxTCB->xTaskRunState >= 0 ) && ( pxTCB->xTaskRunState < configNUMBER_OF_CORES ) ) #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) #endif /* Indicates that the task is an Idle task. */ #define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1UL << 0UL ) -#if ( ( configNUM_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) #define portGET_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting ) #define portSET_CRITICAL_NESTING_COUNT( x ) ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) ) #define portINCREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ ) #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- ) -#endif /* #if ( ( configNUM_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */ +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */ /* * Task control block. A task control block (TCB) is allocated for each task, @@ -300,7 +300,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif - #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as confNUM_CORES. */ #endif @@ -308,7 +308,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /*< Points to the start of the stack. */ - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */ UBaseType_t uxTaskAttributes; /*< Task's attributes - currently used to identify the idle tasks. */ #endif @@ -378,10 +378,10 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else -portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ] = { NULL }; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -421,11 +421,11 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { pdFALSE }; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; -PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUM_CORES ] = { NULL }; /*< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ] = { NULL }; /*< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -466,24 +466,24 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t */ static BaseType_t prvCreateIdleTasks( void ); -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Checks to see if another task moved the current task out of the ready * list while it was waiting to enter a critical section and yields, if so. */ static void prvCheckForRunStateChange( void ); -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Yields the given core. */ static void prvYieldCore( BaseType_t xCoreID ); -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Yields a core, or cores if multiple priorities are not allowed to run @@ -491,15 +491,15 @@ static BaseType_t prvCreateIdleTasks( void ); */ static void prvYieldForTask( TCB_t * pxTCB, const BaseType_t xPreemptEqualPriority ); -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Selects the highest priority available task for the given core. */ static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /** * Utility task that simply returns pdTRUE if the task referenced by xTask is @@ -523,7 +523,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * The idle task is automatically created and added to the ready lists upon * creation of the first user task. * - * In the FreeRTOS SMP, configNUM_CORES - 1 minimal idle tasks are also + * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 minimal idle tasks are also * created to ensure that each core has an idle task to run when no other * task is available to run. * @@ -535,7 +535,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #endif @@ -668,7 +668,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static void prvCheckForRunStateChange( void ) { UBaseType_t uxPrevCriticalNesting; @@ -742,11 +742,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static void prvYieldCore( BaseType_t xCoreID ) { /* This must be called from a critical section and xCoreID must be valid. */ @@ -770,10 +770,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static void prvYieldForTask( TCB_t * pxTCB, const BaseType_t xPreemptEqualPriority ) { @@ -802,7 +802,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; --xLowestPriorityToPreempt; } - for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; @@ -873,10 +873,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) { UBaseType_t uxCurrentPriority = uxTopReadyPriority; @@ -1016,7 +1016,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * running, make sure all the other idle tasks yield. */ BaseType_t x; - for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) { if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { @@ -1057,9 +1057,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /* The ready task that was removed from this core is excluded from it. */ } - uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + uxCoreMap &= ( ( 1 << configNUMBER_OF_CORES ) - 1 ); - for( x = ( configNUM_CORES - 1 ); x >= 0; x-- ) + for( x = ( configNUMBER_OF_CORES - 1 ); x >= 0; x-- ) { UBaseType_t uxCore = ( UBaseType_t ) x; BaseType_t xTaskPriority; @@ -1102,7 +1102,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; return xTaskScheduled; } -#endif /* ( configNUM_CORES > 1 ) */ +#endif /* ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -1115,7 +1115,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { return xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY ); } @@ -1128,7 +1128,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer, UBaseType_t uxCoreAffinityMask ) - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; TaskHandle_t xReturn; @@ -1165,7 +1165,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; @@ -1189,7 +1189,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); } @@ -1197,7 +1197,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1232,7 +1232,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; pxCreatedTask, pxNewTCB, pxTaskDefinition->xRegions ); - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; @@ -1253,7 +1253,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); } @@ -1261,7 +1261,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, UBaseType_t uxCoreAffinityMask, TaskHandle_t * pxCreatedTask ) - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; @@ -1299,7 +1299,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; pxCreatedTask, pxNewTCB, pxTaskDefinition->xRegions ); - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; @@ -1325,7 +1325,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { return xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask ); } @@ -1337,7 +1337,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; UBaseType_t uxPriority, UBaseType_t uxCoreAffinityMask, TaskHandle_t * const pxCreatedTask ) - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ { TCB_t * pxNewTCB; BaseType_t xReturn; @@ -1414,7 +1414,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { /* Set the task's affinity before scheduling it. */ pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; @@ -1580,7 +1580,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif - #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) { pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; } @@ -1645,7 +1645,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif /* portUSING_MPU_WRAPPERS */ /* Initialize task state and task attributes. */ - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) { pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; @@ -1655,7 +1655,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE; } } - #endif /* #if ( configNUM_CORES > 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ if( pxCreatedTask != NULL ) { @@ -1670,7 +1670,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { @@ -1755,7 +1755,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } -#else /* #if ( configNUM_CORES == 1 ) */ +#else /* #if ( configNUMBER_OF_CORES == 1 ) */ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { @@ -1784,7 +1784,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, BaseType_t xCoreID; /* Check if a core is free. */ - for( xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) + for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ ) { if( pxCurrentTCBs[ xCoreID ] == NULL ) { @@ -1835,7 +1835,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, taskEXIT_CRITICAL(); } -#endif /* #if ( configNUM_CORES == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskDelete == 1 ) @@ -1879,7 +1879,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* If the task is running (or yielding), we must add it to the * termination list so that an idle task can delete it when it is * no longer running. */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) if( pxTCB == pxCurrentTCB ) #else if( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ) @@ -1906,7 +1906,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ 0 ] ); #else portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ pxTCB->xTaskRunState ] ); @@ -1923,7 +1923,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { taskEXIT_CRITICAL(); @@ -1950,7 +1950,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { /* If a running task is not deleting itself, call prvDeleteTCB. If a running * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination @@ -1976,7 +1976,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, taskEXIT_CRITICAL(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* INCLUDE_vTaskDelete */ @@ -2057,7 +2057,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) portYIELD_WITHIN_API(); #else vTaskYieldWithinAPI(); @@ -2109,7 +2109,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) portYIELD_WITHIN_API(); #else vTaskYieldWithinAPI(); @@ -2136,7 +2136,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, configASSERT( pxTCB ); - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) if( pxTCB == pxCurrentTCB ) { /* The task calling this function is querying its own state. */ @@ -2213,13 +2213,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* If the task is not in any other state, it must be in the * Ready (including pending ready) state. */ eReturn = eReady; } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { if( taskTASK_IS_RUNNING( pxTCB ) ) { @@ -2233,7 +2233,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, eReturn = eReady; } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } @@ -2314,7 +2314,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) BaseType_t xYieldForTask = pdFALSE; #endif @@ -2354,7 +2354,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * priority than a running task. */ if( uxNewPriority > uxCurrentBasePriority ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxTCB != pxCurrentTCB ) { @@ -2377,13 +2377,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * priority task able to run so no yield is required. */ } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { /* The priority of a task is being raised so * perform a yield for this task later. */ xYieldForTask = pdTRUE; } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else if( taskTASK_IS_RUNNING( pxTCB ) ) { @@ -2467,7 +2467,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { mtCOVERAGE_TEST_MARKER(); } @@ -2481,7 +2481,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( xYieldRequired != pdFALSE ) { @@ -2492,7 +2492,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -2511,7 +2511,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /* Remove compiler warning about unused variables when the port * optimised task selection is not being used. */ @@ -2524,7 +2524,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ -#if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ) { @@ -2562,7 +2562,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Calculate the cores on which this task was not allowed to * run previously. */ - uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1 << configNUM_CORES ) - 1 ); + uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1 << configNUMBER_OF_CORES ) - 1 ); /* Does the new core mask enables this task to run on any of the * previously not allowed cores? If yes, check if this task can be @@ -2582,10 +2582,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } taskEXIT_CRITICAL(); } -#endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) { TCB_t * pxTCB; @@ -2600,7 +2600,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, return uxCoreAffinityMask; } -#endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -2656,7 +2656,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { TCB_t * pxTCB; - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) TaskRunning_t xTaskRunningOnCore; #endif @@ -2668,7 +2668,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceTASK_SUSPEND( pxTCB ); - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) xTaskRunningOnCore = pxTCB->xTaskRunState; #endif @@ -2712,7 +2712,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { taskEXIT_CRITICAL(); @@ -2763,7 +2763,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { if( xSchedulerRunning != pdFALSE ) { @@ -2829,7 +2829,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, taskEXIT_CRITICAL(); } } /* taskEXIT_CRITICAL() - already exited in one of three cases above. */ - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* INCLUDE_vTaskSuspend */ @@ -2890,7 +2890,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* It does not make sense to resume the calling task. */ configASSERT( xTaskToResume ); - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ @@ -2916,7 +2916,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* A higher priority task may have just been resumed. */ if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) @@ -2931,7 +2931,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -2939,7 +2939,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -2995,7 +2995,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* Check the ready lists can be accessed. */ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ @@ -3013,7 +3013,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -3026,7 +3026,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - #if ( ( configNUM_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) { prvYieldForTask( pxTCB, pdTRUE ); @@ -3035,7 +3035,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, xYieldRequired = pdTRUE; } } - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ } else { @@ -3054,7 +3054,7 @@ static BaseType_t prvCreateIdleTasks( void ) { BaseType_t xReturn = pdPASS; - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* Add the idle task at the lowest priority. */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) @@ -3095,13 +3095,13 @@ static BaseType_t prvCreateIdleTasks( void ) } #endif /* configSUPPORT_STATIC_ALLOCATION */ } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { BaseType_t xCoreID; char cIdleName[ configMAX_TASK_NAME_LEN ]; /* Add each idle task at the lowest priority. */ - for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { BaseType_t x; @@ -3172,8 +3172,8 @@ static BaseType_t prvCreateIdleTasks( void ) } else { - static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; - static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, cIdleName, @@ -3218,7 +3218,7 @@ static BaseType_t prvCreateIdleTasks( void ) #endif /* configSUPPORT_STATIC_ALLOCATION */ } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ return xReturn; } @@ -3326,7 +3326,7 @@ void vTaskEndScheduler( void ) void vTaskSuspendAll( void ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* A critical section is not required as the variable is of type * BaseType_t. Please read Richard Barry's reply in the following link to a @@ -3345,7 +3345,7 @@ void vTaskSuspendAll( void ) * the above increment elsewhere. */ portMEMORY_BARRIER(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { UBaseType_t ulState; @@ -3396,7 +3396,7 @@ void vTaskSuspendAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } /*----------------------------------------------------------*/ @@ -3469,7 +3469,7 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) if( xSchedulerRunning != pdFALSE ) #endif { @@ -3504,7 +3504,7 @@ BaseType_t xTaskResumeAll( void ) listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* If the moved task has a priority higher than the current * task then a yield must be performed. */ @@ -3517,13 +3517,13 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. * If the current core yielded then vTaskSwitchContext() has already been called * which sets xYieldPendings for the current core to pdTRUE. */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } if( pxTCB != NULL ) @@ -3583,11 +3583,11 @@ BaseType_t xTaskResumeAll( void ) } #endif /* #if ( configUSE_PREEMPTION != 0 ) */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { taskYIELD_IF_USING_PREEMPTION(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -4014,7 +4014,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* Preemption is on, but a context switch should only be * performed if the unblocked task has a priority that is @@ -4030,7 +4030,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { taskENTER_CRITICAL(); { @@ -4038,7 +4038,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) } taskEXIT_CRITICAL(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } @@ -4061,12 +4061,12 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; - #if ( configUSE_PREEMPTION == 1 ) && ( configNUM_CORES > 1 ) + #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) UBaseType_t x; - BaseType_t xYieldRequiredForCore[ configNUM_CORES ] = { pdFALSE }; - #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUM_CORES > 1 ) */ + BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE }; + #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) taskENTER_CRITICAL(); #endif { @@ -4162,7 +4162,7 @@ BaseType_t xTaskIncrementTick( void ) * context switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* Preemption is on, but a context switch should * only be performed if the unblocked task's @@ -4181,11 +4181,11 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if( configNUM_CORES == 1 ) */ + #else /* #if( configNUMBER_OF_CORES == 1 ) */ { prvYieldForTask( pxTCB, pdTRUE ); } - #endif /* #if( configNUM_CORES == 1 ) */ + #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } @@ -4197,7 +4197,7 @@ BaseType_t xTaskIncrementTick( void ) * writer has not explicitly turned time slicing off. */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { @@ -4208,9 +4208,9 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUM_CORES ); x++ ) + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUMBER_OF_CORES ); x++ ) { if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { @@ -4222,7 +4222,7 @@ BaseType_t xTaskIncrementTick( void ) } } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ @@ -4243,7 +4243,7 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { /* For single core the core ID is always 0. */ if( xYieldPendings[ 0 ] != pdFALSE ) @@ -4255,12 +4255,12 @@ BaseType_t xTaskIncrementTick( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { BaseType_t xCoreID; xCoreID = portGET_CORE_ID(); - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUMBER_OF_CORES; x++ ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) @@ -4284,7 +4284,7 @@ BaseType_t xTaskIncrementTick( void ) } } } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } @@ -4301,7 +4301,7 @@ BaseType_t xTaskIncrementTick( void ) #endif } } - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) taskEXIT_CRITICAL(); #endif @@ -4421,7 +4421,7 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES == 1 ) +#if ( configNUMBER_OF_CORES == 1 ) void vTaskSwitchContext( void ) { if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) @@ -4494,7 +4494,7 @@ BaseType_t xTaskIncrementTick( void ) #endif } } -#else /* if ( configNUM_CORES == 1 ) */ +#else /* if ( configNUMBER_OF_CORES == 1 ) */ void vTaskSwitchContext( BaseType_t xCoreID ) { /* Acquire both locks: @@ -4585,7 +4585,7 @@ BaseType_t xTaskIncrementTick( void ) portRELEASE_ISR_LOCK(); portRELEASE_TASK_LOCK(); } -#endif /* if ( configNUM_CORES > 1 ) */ +#endif /* if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ void vTaskPlaceOnEventList( List_t * const pxEventList, @@ -4722,7 +4722,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) { @@ -4740,7 +4740,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) xReturn = pdFALSE; } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { xReturn = pdFALSE; @@ -4755,7 +4755,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ return xReturn; } @@ -4799,7 +4799,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) { @@ -4810,7 +4810,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, xYieldPendings[ 0 ] = pdTRUE; } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -4822,7 +4822,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, } #endif } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } /*-----------------------------------------------------------*/ @@ -4973,7 +4973,7 @@ void vTaskMissedYield( void ) * void prvMinimalIdleTask( void *pvParameters ); */ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) { ( void ) pvParameters; @@ -5004,7 +5004,7 @@ void vTaskMissedYield( void ) * the ready list at the idle priority contains one more task than the * number of idle tasks, which is equal to the configured numbers of cores * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES ) { taskYIELD(); } @@ -5032,7 +5032,7 @@ void vTaskMissedYield( void ) #endif /* configUSE_MINIMAL_IDLE_HOOK */ } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /* * ----------------------------------------------------------- @@ -5058,13 +5058,13 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * any. */ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) { /* SMP all cores start up in the idle task. This initial yield gets the application * tasks started. */ taskYIELD(); } - #endif /* #if ( configNUM_CORES > 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ for( ; ; ) { @@ -5094,7 +5094,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * the ready list at the idle priority contains one more task than the * number of idle tasks, which is equal to the configured numbers of cores * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES ) { taskYIELD(); } @@ -5168,7 +5168,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } #endif /* configUSE_TICKLESS_IDLE */ - #if ( ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) { extern void vApplicationMinimalIdleHook( void ); @@ -5182,7 +5182,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * CALL A FUNCTION THAT MIGHT BLOCK. */ vApplicationMinimalIdleHook(); } - #endif /* #if ( ( configNUM_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ } } /*-----------------------------------------------------------*/ @@ -5344,7 +5344,7 @@ static void prvCheckTasksWaitingTermination( void ) * being called too often in the idle task. */ while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { taskENTER_CRITICAL(); { @@ -5359,7 +5359,7 @@ static void prvCheckTasksWaitingTermination( void ) prvDeleteTCB( pxTCB ); } - #else /* #if( configNUM_CORES == 1 ) */ + #else /* #if( configNUMBER_OF_CORES == 1 ) */ { pxTCB = NULL; @@ -5395,7 +5395,7 @@ static void prvCheckTasksWaitingTermination( void ) prvDeleteTCB( pxTCB ); } } - #endif /* #if( configNUM_CORES == 1 ) */ + #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } } #endif /* INCLUDE_vTaskDelete */ @@ -5424,7 +5424,7 @@ static void prvCheckTasksWaitingTermination( void ) #endif pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; - #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUM_CORES > 1 ) ) + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) { pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } @@ -5711,9 +5711,9 @@ static void prvResetNextTaskUnblockTime( void ) } /*-----------------------------------------------------------*/ -#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUM_CORES > 1 ) +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 ) - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; @@ -5725,7 +5725,7 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; @@ -5751,7 +5751,7 @@ static void prvResetNextTaskUnblockTime( void ) return xReturn; } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -5768,7 +5768,7 @@ static void prvResetNextTaskUnblockTime( void ) } else { - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) taskENTER_CRITICAL(); #endif { @@ -5781,7 +5781,7 @@ static void prvResetNextTaskUnblockTime( void ) xReturn = taskSCHEDULER_SUSPENDED; } } - #if ( configNUM_CORES > 1 ) + #if ( configNUMBER_OF_CORES > 1 ) taskEXIT_CRITICAL(); #endif } @@ -6068,7 +6068,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) /* If not in a critical section then yield immediately. * Otherwise set xYieldPendings to true to wait to @@ -6085,11 +6085,11 @@ static void prvResetNextTaskUnblockTime( void ) xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) void vTaskEnterCritical( void ) { @@ -6116,10 +6116,10 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskEnterCritical( void ) { @@ -6161,11 +6161,11 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ) { @@ -6190,10 +6190,10 @@ static void prvResetNextTaskUnblockTime( void ) return uxSavedInterruptStatus; } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) void vTaskExitCritical( void ) { @@ -6231,10 +6231,10 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUM_CORES == 1 ) ) */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCritical( void ) { @@ -6288,10 +6288,10 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configNUM_CORES > 1 ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { @@ -6340,7 +6340,7 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* #if ( configNUM_CORES > 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) @@ -6655,7 +6655,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } @@ -6742,7 +6742,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } @@ -6901,7 +6901,7 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { @@ -6914,7 +6914,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -6922,7 +6922,7 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -7048,7 +7048,7 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { @@ -7069,7 +7069,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -7085,7 +7085,7 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif /* if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); @@ -7159,7 +7159,7 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { @@ -7180,7 +7180,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { #if ( configUSE_PREEMPTION == 1 ) { @@ -7196,7 +7196,7 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); @@ -7273,7 +7273,7 @@ TickType_t uxTaskResetEventItemValue( void ) { configRUN_TIME_COUNTER_TYPE ulReturn = 0; - for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) { ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; } @@ -7291,7 +7291,7 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUM_CORES; + ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES; /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; @@ -7299,7 +7299,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Avoid divide by zero errors. */ if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) { - for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) { ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; } diff --git a/timers.c b/timers.c index ceff90931a3..6fe14d1afdd 100644 --- a/timers.c +++ b/timers.c @@ -688,15 +688,15 @@ * block time to expire. If a command arrived between the * critical section being exited and this yield then the yield * will not cause the task to block. */ - #if ( configNUM_CORES == 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { portYIELD_WITHIN_API(); } - #else /* #if ( configNUM_CORES == 1 ) */ + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { vTaskYieldWithinAPI(); } - #endif /* #if ( configNUM_CORES == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { From ef88897c099b736ab50f9798eae098a982a8c3dc Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 6 Jan 2023 17:27:41 +0800 Subject: [PATCH 133/164] Fix the task selection when task yields (#54) --- tasks.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index ae37b89d275..96ddf0bc954 100644 --- a/tasks.c +++ b/tasks.c @@ -890,6 +890,24 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xPriorityDropped = pdFALSE; #endif + /* A new task is created and a running task with the same priority yields + * itself to run the new task. When a running task yields itself, it is still + * in the ready list. This running task will be selected before the new task + * since the new task is always added to the end of the ready list. + * The other problem is that the running task still in the same position of + * the ready list when it yields itself. It is possible that it will be selected + * earlier then other tasks which waits longer than this task. + * + * To fix these problems, the running task should be put to the end of the + * ready list before searching for the ready task in the ready list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), + &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE ) + { + uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem ); + vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), + &pxCurrentTCBs[ xCoreID ]->xStateListItem ); + } + while( xTaskScheduled == pdFALSE ) { #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) @@ -970,10 +988,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( xTaskScheduled != pdFALSE ) { - /* Once a task has been selected to run on this core, - * move it to the end of the ready task list. */ - uxListRemove( pxIterator ); - vListInsertEnd( pxReadyList, pxIterator ); + /* A task has been selected to run on this core. */ break; } } From 87aa63133e79f65d879a89cba3276d2dd770d2d0 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 6 Jan 2023 17:28:10 +0800 Subject: [PATCH 134/164] Move xTaskIncrementTick critical section to port (#55) * Port should use taskENTER/EXIT_CRITICAL_FROM_ISR --- tasks.c | 346 +++++++++++++++++++++++++++----------------------------- 1 file changed, 169 insertions(+), 177 deletions(-) diff --git a/tasks.c b/tasks.c index 96ddf0bc954..0abd93ccd3c 100644 --- a/tasks.c +++ b/tasks.c @@ -4081,244 +4081,236 @@ BaseType_t xTaskIncrementTick( void ) BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE }; #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ - #if ( configNUMBER_OF_CORES > 1 ) - taskENTER_CRITICAL(); - #endif + /* Called by the portable layer each time a tick interrupt occurs. + * Increments the tick then checks to see if the new tick value will cause any + * tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsibility to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - /* Called by the portable layer each time a tick interrupt occurs. - * Increments the tick then checks to see if the new tick value will cause any - * tasks to be unblocked. */ - traceTASK_INCREMENT_TICK( xTickCount ); - - /* Tick increment should occur on every kernel timer event. Core 0 has the - * responsibility to increment the tick, or increment the pended ticks if the - * scheduler is suspended. If pended ticks is greater than zero, the core that - * calls xTaskResumeAll has the responsibility to increment the tick. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) - { - /* Minor optimisation. The tick count cannot change in this - * block. */ - const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - /* Increment the RTOS tick, switching the delayed and overflowed - * delayed lists if it wraps to 0. */ - xTickCount = xConstTickCount; + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; - if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ - { - taskSWITCH_DELAYED_LISTS(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* See if this tick has made a timeout expire. Tasks are stored in - * the queue in the order of their wake time - meaning once one task - * has been found whose block time has not expired there is no need to - * look any further down the list. */ - if( xConstTickCount >= xNextTaskUnblockTime ) + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) { - for( ; ; ) + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else { - if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) { - /* The delayed list is empty. Set xNextTaskUnblockTime - * to the maximum possible value so it is extremely - * unlikely that the - * if( xTickCount >= xNextTaskUnblockTime ) test will pass - * next time through. */ - xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - break; + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ } else { - /* The delayed list is not empty, get the value of the - * item at the head of the delayed list. This is the time - * at which the task at the head of the delayed list must - * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); - - if( xConstTickCount < xItemValue ) - { - /* It is not time to unblock this item yet, but the - * item value is the time at which the task at the head - * of the blocked list must be removed from the Blocked - * state - so record the item value in - * xNextTaskUnblockTime. */ - xNextTaskUnblockTime = xItemValue; - break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* It is time to remove the item from the Blocked state. */ - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - - /* Is the task waiting on an event also? If so remove - * it from the event list. */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Place the unblocked task into the appropriate ready - * list. */ - prvAddTaskToReadyList( pxTCB ); - - /* A task being unblocked cannot cause an immediate - * context switch if preemption is turned off. */ - #if ( configUSE_PREEMPTION == 1 ) - { - #if ( configNUMBER_OF_CORES == 1 ) - { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task's - * priority is higher than the currently executing - * task. - * The case of equal priority tasks sharing - * processing time (which happens when both - * preemption and time slicing are on) is - * handled below.*/ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #else /* #if( configNUMBER_OF_CORES == 1 ) */ - { - prvYieldForTask( pxTCB, pdTRUE ); - } - #endif /* #if( configNUMBER_OF_CORES == 1 ) */ - } - #endif /* #if ( configUSE_PREEMPTION == 1 ) */ + mtCOVERAGE_TEST_MARKER(); } - } - } - /* Tasks of equal priority to the currently running task will share - * processing time (time slice) if preemption is on, and the application - * writer has not explicitly turned time slicing off. */ - #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) - { - #if ( configNUMBER_OF_CORES == 1 ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + /* It is time to remove the item from the Blocked state. */ + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { - xSwitchRequired = pdTRUE; + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); } else { mtCOVERAGE_TEST_MARKER(); } - } - #else /* #if ( configNUMBER_OF_CORES == 1 ) */ - { - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUMBER_OF_CORES ); x++ ) + + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { - xYieldRequiredForCore[ x ] = pdTRUE; + /* Preemption is on, but a context switch should + * only be performed if the unblocked task's + * priority is higher than the currently executing + * task. + * The case of equal priority tasks sharing + * processing time (which happens when both + * preemption and time slicing are on) is + * handled below.*/ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + prvYieldForTask( pxTCB, pdTRUE ); } + #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + } - #if ( configUSE_TICK_HOOK == 1 ) + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + #if ( configNUMBER_OF_CORES == 1 ) { - /* Guard against the tick hook being called when the pended tick - * count is being unwound (when the scheduler is being unlocked). */ - if( xPendedTicks == ( TickType_t ) 0 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - vApplicationTickHook(); + xSwitchRequired = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* configUSE_TICK_HOOK */ - - #if ( configUSE_PREEMPTION == 1 ) + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - #if ( configNUMBER_OF_CORES == 1 ) + for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUMBER_OF_CORES ); x++ ) { - /* For single core the core ID is always 0. */ - if( xYieldPendings[ 0 ] != pdFALSE ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) { - xSwitchRequired = pdTRUE; + xYieldRequiredForCore[ x ] = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } - #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + } + #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicks == ( TickType_t ) 0 ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + + #if ( configUSE_PREEMPTION == 1 ) + { + #if ( configNUMBER_OF_CORES == 1 ) + { + /* For single core the core ID is always 0. */ + if( xYieldPendings[ 0 ] != pdFALSE ) { - BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + BaseType_t xCoreID; + xCoreID = portGET_CORE_ID(); - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUMBER_OF_CORES; x++ ) + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUMBER_OF_CORES; x++ ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) - #endif + if( ( xYieldRequiredForCore[ x ] != pdFALSE ) || ( xYieldPendings[ x ] != pdFALSE ) ) { - if( ( xYieldRequiredForCore[ x ] != pdFALSE ) || ( xYieldPendings[ x ] != pdFALSE ) ) + if( x == ( UBaseType_t ) xCoreID ) { - if( x == ( UBaseType_t ) xCoreID ) - { - xSwitchRequired = pdTRUE; - } - else - { - prvYieldCore( x ); - } + xSwitchRequired = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + prvYieldCore( x ); } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - #endif /* #if ( configUSE_PREEMPTION == 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - else - { - ++xPendedTicks; + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ + } + else + { + ++xPendedTicks; - /* The tick hook gets called at regular intervals, even if the - * scheduler is locked. */ - #if ( configUSE_TICK_HOOK == 1 ) - { - vApplicationTickHook(); - } - #endif + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); } + #endif } - #if ( configNUMBER_OF_CORES > 1 ) - taskEXIT_CRITICAL(); - #endif return xSwitchRequired; } From 57d201c53ebb26700d4ae805c7350d2991162746 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 6 Jan 2023 17:39:16 +0800 Subject: [PATCH 135/164] Not preempt equal priority task in the following functions (#56) Not to preempt equal priority task in the following functions * vTaskResume * vTaskResumeFromISR * vTaskPrioritySet * vTaskCoreAffinitySet --- tasks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index 0abd93ccd3c..8346d64c3e5 100644 --- a/tasks.c +++ b/tasks.c @@ -2517,7 +2517,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else if( xYieldForTask != pdFALSE ) { - prvYieldForTask( pxTCB, pdTRUE ); + prvYieldForTask( pxTCB, pdFALSE ); } else { @@ -2584,7 +2584,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * scheduled on any of those cores. */ if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U ) { - prvYieldForTask( pxTCB, pdTRUE ); + prvYieldForTask( pxTCB, pdFALSE ); } } #else /* #if( configUSE_PREEMPTION == 1 ) */ @@ -2950,7 +2950,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxTCB, pdTRUE ); + prvYieldForTask( pxTCB, pdFALSE ); } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } @@ -3043,7 +3043,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) { - prvYieldForTask( pxTCB, pdTRUE ); + prvYieldForTask( pxTCB, pdFALSE ); if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { From 8b13d4b30bf1707daa9ff3a7dcb239fa9747c0c7 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 9 Jan 2023 12:41:25 +0800 Subject: [PATCH 136/164] Remove implicit test (#49) * Remove taskTASK_IS_RUNNING implicit test * Remove portCHECK_IF_IN_ISR implicit test * Fix taskVALID_CORE_ID implicit test * Remove configASSERT implicit test --- tasks.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tasks.c b/tasks.c index 8346d64c3e5..044d4151585 100644 --- a/tasks.c +++ b/tasks.c @@ -750,7 +750,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; static void prvYieldCore( BaseType_t xCoreID ) { /* This must be called from a critical section and xCoreID must be valid. */ - if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) + if( ( portCHECK_IF_IN_ISR() == pdTRUE ) && ( xCoreID == portGET_CORE_ID() ) ) { xYieldPendings[ xCoreID ] = pdTRUE; } @@ -857,7 +857,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } - if( ( xYieldCount == 0 ) && taskVALID_CORE_ID( xLowestPriorityCore ) ) + if( ( xYieldCount == 0 ) && ( taskVALID_CORE_ID( xLowestPriorityCore ) == pdTRUE ) ) { prvYieldCore( xLowestPriorityCore ); } @@ -1020,7 +1020,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( xTaskScheduled == pdTRUE ) { - configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ); + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) == pdTRUE ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { @@ -1105,7 +1105,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } - if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + if( taskVALID_CORE_ID( xLowestPriorityCore ) == pdTRUE ) { prvYieldCore( xLowestPriorityCore ); } @@ -1976,7 +1976,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } /* Force a reschedule if the task that has just been deleted was running. */ - if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) ) ) + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) { if( pxTCB->xTaskRunState == portGET_CORE_ID() ) { @@ -2236,7 +2236,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { /* Is it actively running on a core? */ eReturn = eRunning; @@ -2400,7 +2400,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - else if( taskTASK_IS_RUNNING( pxTCB ) ) + else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { /* Setting the priority of a running task down means * there may now be another task of higher priority that @@ -2560,7 +2560,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( xSchedulerRunning != pdFALSE ) { - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; @@ -2652,7 +2652,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( xSchedulerRunning != pdFALSE ) { - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; prvYieldCore( xCoreID ); @@ -2791,7 +2791,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { if( xSchedulerRunning != pdFALSE ) { @@ -3500,7 +3500,7 @@ BaseType_t xTaskResumeAll( void ) /* If uxSchedulerSuspended is zero then this function does not match a * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); + configASSERT( uxSchedulerSuspended != 0U ); --uxSchedulerSuspended; portRELEASE_TASK_LOCK(); @@ -5462,7 +5462,7 @@ static void prvCheckTasksWaitingTermination( void ) * state is just set to whatever is passed in. */ if( eState != eInvalid ) { - if( taskTASK_IS_RUNNING( pxTCB ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { pxTaskStatus->eCurrentState = eRunning; } From c785ebf55006114b13a8ed4512ad6d0ad78bebc2 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 12 Jan 2023 12:47:03 +0800 Subject: [PATCH 137/164] Fix preempt equal priority task in xTaskIncrementTick (#58) * Not preempt equal priority when a task is removed from delay list. Process time sharing is handle in the logic below. * Remove the xPreemptEqualPriority parameter of prvYieldForTask --- tasks.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/tasks.c b/tasks.c index 044d4151585..affe746aa47 100644 --- a/tasks.c +++ b/tasks.c @@ -489,8 +489,7 @@ static BaseType_t prvCreateIdleTasks( void ); * Yields a core, or cores if multiple priorities are not allowed to run * simultaneously, to allow the task pxTCB to run. */ - static void prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority ); + static void prvYieldForTask( TCB_t * pxTCB ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #if ( configNUMBER_OF_CORES > 1 ) @@ -774,8 +773,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) - static void prvYieldForTask( TCB_t * pxTCB, - const BaseType_t xPreemptEqualPriority ) + static void prvYieldForTask( TCB_t * pxTCB ) { BaseType_t xLowestPriorityToPreempt; BaseType_t xCurrentCoreTaskPriority; @@ -795,12 +793,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; { xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority; - if( xPreemptEqualPriority == pdFALSE ) - { - /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB - * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ - --xLowestPriorityToPreempt; - } + /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriorityToPreempt; for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { @@ -1839,7 +1834,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * currently running task and preemption is on then it should * run now. */ #if ( configUSE_PREEMPTION == 1 ) - prvYieldForTask( pxNewTCB, pdFALSE ); + prvYieldForTask( pxNewTCB ); #endif } else @@ -2517,7 +2512,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else if( xYieldForTask != pdFALSE ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); } else { @@ -2584,7 +2579,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * scheduled on any of those cores. */ if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); } } #else /* #if( configUSE_PREEMPTION == 1 ) */ @@ -2950,7 +2945,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); } #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } @@ -3043,7 +3038,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { @@ -4049,7 +4044,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) { taskENTER_CRITICAL(); { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); } taskEXIT_CRITICAL(); } @@ -4194,7 +4189,7 @@ BaseType_t xTaskIncrementTick( void ) } #else /* #if( configNUMBER_OF_CORES == 1 ) */ { - prvYieldForTask( pxTCB, pdTRUE ); + prvYieldForTask( pxTCB ); } #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } @@ -4753,7 +4748,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxUnblockedTCB, pdFALSE ); + prvYieldForTask( pxUnblockedTCB ); if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { @@ -4823,7 +4818,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, { taskENTER_CRITICAL(); { - prvYieldForTask( pxUnblockedTCB, pdFALSE ); + prvYieldForTask( pxUnblockedTCB ); } taskEXIT_CRITICAL(); } @@ -6925,7 +6920,7 @@ TickType_t uxTaskResetEventItemValue( void ) { #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); } #endif } @@ -7080,7 +7075,7 @@ TickType_t uxTaskResetEventItemValue( void ) { #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) { @@ -7191,7 +7186,7 @@ TickType_t uxTaskResetEventItemValue( void ) { #if ( configUSE_PREEMPTION == 1 ) { - prvYieldForTask( pxTCB, pdFALSE ); + prvYieldForTask( pxTCB ); if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) { From 161a2c7d5c4c9cf52bacdb2f45daf00a4b7d6a72 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 24 Jan 2023 16:23:20 +0800 Subject: [PATCH 138/164] Remove prvSelectHighestPriorityTask call in vTaskSuspend (#59) * Every core starts with an idle task in SMP implementation and taskTASK_IS_RUNNING only return ture when the task is idle task before scheduler started. So prvSelectHighestPriorityTask won't be called in vTaskSuspend before scheduler started. * Update prvSelectHighestPriorityTask to ensure that this function is called only when scheduler started. --- tasks.c | 63 +++++++++++++++++---------------------------------------- 1 file changed, 19 insertions(+), 44 deletions(-) diff --git a/tasks.c b/tasks.c index affe746aa47..91c0e6f937b 100644 --- a/tasks.c +++ b/tasks.c @@ -497,7 +497,7 @@ static BaseType_t prvCreateIdleTasks( void ); /* * Selects the highest priority available task for the given core. */ - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ); + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /** @@ -872,7 +872,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) - static BaseType_t prvSelectHighestPriorityTask( BaseType_t xCoreID ) + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ) { UBaseType_t uxCurrentPriority = uxTopReadyPriority; BaseType_t xTaskScheduled = pdFALSE; @@ -885,6 +885,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xPriorityDropped = pdFALSE; #endif + /* This function should be called when scheduler is running. */ + configASSERT( xSchedulerRunning == pdTRUE ); + /* A new task is created and a running task with the same priority yields * itself to run the new task. When a running task yields itself, it is still * in the ready list. This running task will be selected before the new task @@ -1001,14 +1004,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } - /* This function can get called by vTaskSuspend() before the scheduler is started. - * In that case, since the idle tasks have not yet been created it is possible that we - * won't find a new task to schedule. Return pdFALSE in this case. */ - if( ( xSchedulerRunning == pdFALSE ) && ( uxCurrentPriority == tskIDLE_PRIORITY ) && ( xTaskScheduled == pdFALSE ) ) - { - break; - } - + /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started. + * The scheduler should be able to select a task to run when uxCurrentPriority + * is tskIDLE_PRIORITY. */ configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); uxCurrentPriority--; } @@ -1108,8 +1106,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */ } - - return xTaskScheduled; } #endif /* ( configNUMBER_OF_CORES > 1 ) */ @@ -2800,45 +2796,24 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { prvYieldCore( xTaskRunningOnCore ); } - - taskEXIT_CRITICAL(); } else { - taskEXIT_CRITICAL(); - - configASSERT( pxTCB == pxCurrentTCBs[ xTaskRunningOnCore ] ); - - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ - { - /* No other tasks are ready, so set the core's TCB back to - * NULL so when the next task is created the core's TCB will - * be able to be set to point to it no matter what its relative - * priority is. */ - pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; - pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; - } - else - { - /* Attempt to switch in a new task. This could fail since the idle tasks - * haven't been created yet. If it does then set the core's TCB back to - * NULL. */ - if( prvSelectHighestPriorityTask( xTaskRunningOnCore ) == pdFALSE ) - { - pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; - pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; - } - } + /* This code path is not possible because only Idle tasks are + * assigned a core before the scheduler is started ( i.e. + * taskTASK_IS_RUNNING is only true for idle tasks before + * the scheduler is started ) and idle tasks cannot be + * suspended. */ + mtCOVERAGE_TEST_MARKER(); } } else { - taskEXIT_CRITICAL(); + mtCOVERAGE_TEST_MARKER(); } - } /* taskEXIT_CRITICAL() - already exited in one of three cases above. */ + + taskEXIT_CRITICAL(); + } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } @@ -4565,7 +4540,7 @@ BaseType_t xTaskIncrementTick( void ) #endif /* Select a new task to run. */ - ( void ) prvSelectHighestPriorityTask( xCoreID ); + prvSelectHighestPriorityTask( xCoreID ); traceTASK_SWITCHED_IN(); /* After the new task is switched in, update the global errno. */ From b266846c84a18491a3f186d76a71eacacfe567d8 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 17 Feb 2023 18:11:54 +0800 Subject: [PATCH 139/164] Adding portIDLE_TASK_TEST_MOCK in idle task function (#66) * Adding configIDLE_TASK_HOOK in idle task function --- include/FreeRTOS.h | 4 ++++ tasks.c | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 47cf0a47a11..9a774269f57 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1268,6 +1268,10 @@ #define configRUN_ADDITIONAL_TESTS 0 #endif +#ifndef configIDLE_TASK_HOOK + #define configIDLE_TASK_HOOK() +#endif + /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using * dynamically allocated RAM, in which case when any task is deleted it is known * that both the task's stack and TCB need to be freed. Sometimes the diff --git a/tasks.c b/tasks.c index 91c0e6f937b..04575d5be94 100644 --- a/tasks.c +++ b/tasks.c @@ -5007,6 +5007,11 @@ void vTaskMissedYield( void ) vApplicationMinimalIdleHook(); } #endif /* configUSE_MINIMAL_IDLE_HOOK */ + + /* Code below here allows additional code to be inserted into idle task + * function, especially for loop controlling (for example when performing + * unit tests). */ + configIDLE_TASK_HOOK(); } } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ @@ -5160,6 +5165,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) vApplicationMinimalIdleHook(); } #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ + + /* Code below here allows additional code to be inserted into idle task + * function, especially for loop controlling (for example when performing + * unit tests). */ + configIDLE_TASK_HOOK(); } } /*-----------------------------------------------------------*/ From b051d241e46ca1fdb5fe6e86c3170464588bc32d Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 24 Feb 2023 12:42:36 +0800 Subject: [PATCH 140/164] Add INFINITE_LOOP macro to test idle task function (#67) * Remove configIDLE_TASK_HOOK * Add INFINIT_LOOP. Unit test can redefine this macro to mock the function. --- include/FreeRTOS.h | 4 ---- tasks.c | 20 ++++++++------------ 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 9a774269f57..47cf0a47a11 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1268,10 +1268,6 @@ #define configRUN_ADDITIONAL_TESTS 0 #endif -#ifndef configIDLE_TASK_HOOK - #define configIDLE_TASK_HOOK() -#endif - /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using * dynamically allocated RAM, in which case when any task is deleted it is known * that both the task's stack and TCB need to be freed. Sometimes the diff --git a/tasks.c b/tasks.c index 04575d5be94..5ee298848b9 100644 --- a/tasks.c +++ b/tasks.c @@ -287,6 +287,12 @@ typedef BaseType_t TaskRunning_t; #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- ) #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */ +/* Code below here allows infinite loop controlling, especially for the infinite loop + * in idle task function (for example when performing unit tests). */ +#ifndef INFINITE_LOOP + #define INFINITE_LOOP() 1 +#endif + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -4957,7 +4963,7 @@ void vTaskMissedYield( void ) taskYIELD(); - for( ; ; ) + while( INFINITE_LOOP() ) { #if ( configUSE_PREEMPTION == 0 ) { @@ -5007,11 +5013,6 @@ void vTaskMissedYield( void ) vApplicationMinimalIdleHook(); } #endif /* configUSE_MINIMAL_IDLE_HOOK */ - - /* Code below here allows additional code to be inserted into idle task - * function, especially for loop controlling (for example when performing - * unit tests). */ - configIDLE_TASK_HOOK(); } } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ @@ -5048,7 +5049,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ - for( ; ; ) + while( INFINITE_LOOP() ) { /* See if any tasks have deleted themselves - if so then the idle task * is responsible for freeing the deleted task's TCB and stack. */ @@ -5165,11 +5166,6 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) vApplicationMinimalIdleHook(); } #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ - - /* Code below here allows additional code to be inserted into idle task - * function, especially for loop controlling (for example when performing - * unit tests). */ - configIDLE_TASK_HOOK(); } } /*-----------------------------------------------------------*/ From 350c5c3e0abc848df633148114a4c8ab8366b9b4 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 12:34:37 +0800 Subject: [PATCH 141/164] portYield is not called when exit critical section from ISR (#60) * Reference SMP branch --- tasks.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tasks.c b/tasks.c index 5ee298848b9..fe666eed75e 100644 --- a/tasks.c +++ b/tasks.c @@ -6278,8 +6278,6 @@ static void prvResetNextTaskUnblockTime( void ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { - BaseType_t xYieldCurrentTask; - if( xSchedulerRunning != pdFALSE ) { /* If critical nesting count is zero then this function @@ -6292,20 +6290,8 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; - portRELEASE_ISR_LOCK(); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } } else { From 39d709e8a12f0f6b9da76b338d323c3132348ea4 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 12:53:41 +0800 Subject: [PATCH 142/164] Fix list index is moved in prvSearchForNameWithinSingleList (#61) * index pointer should not be moved in SMP --- tasks.c | 158 +++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 112 insertions(+), 46 deletions(-) diff --git a/tasks.c b/tasks.c index fe666eed75e..09ab0d0399a 100644 --- a/tasks.c +++ b/tasks.c @@ -3666,71 +3666,137 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #if ( INCLUDE_xTaskGetHandle == 1 ) - static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, - const char pcNameToQuery[] ) - { - TCB_t * pxNextTCB; - TCB_t * pxFirstTCB; - TCB_t * pxReturn = NULL; - UBaseType_t x; - char cNextChar; - BaseType_t xBreakLoop; - - /* This function is called with the scheduler suspended. */ - - if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + #if ( configNUMBER_OF_CORES == 1 ) + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) { - listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + TCB_t * pxNextTCB; + TCB_t * pxFirstTCB; + TCB_t * pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; - do - { - listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + /* This function is called with the scheduler suspended. */ - /* Check each character in the name looking for a match or - * mismatch. */ - xBreakLoop = pdFALSE; + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + do { - cNextChar = pxNextTCB->pcTaskName[ x ]; + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - if( cNextChar != pcNameToQuery[ x ] ) + /* Check each character in the name looking for a match or + * mismatch. */ + xBreakLoop = pdFALSE; + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - /* Characters didn't match. */ - xBreakLoop = pdTRUE; + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + * found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } } - else if( cNextChar == ( char ) 0x00 ) + + if( pxReturn != NULL ) { - /* Both strings terminated, a match must have been - * found. */ - pxReturn = pxNextTCB; - xBreakLoop = pdTRUE; + /* The handle has been found. */ + break; } - else + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + #else + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) + { + TCB_t * pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList ); + ListItem_t * pxIterator; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) ) + { + TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator ); + + /* Check each character in the name looking for a match or + * mismatch. */ + xBreakLoop = pdFALSE; + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - mtCOVERAGE_TEST_MARKER(); + cNextChar = pxTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + * found. */ + pxReturn = pxTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } } - if( xBreakLoop != pdFALSE ) + if( pxReturn != NULL ) { + /* The handle has been found. */ break; } } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - if( pxReturn != NULL ) - { - /* The handle has been found. */ - break; - } - } while( pxNextTCB != pxFirstTCB ); + return pxReturn; } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - return pxReturn; - } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* INCLUDE_xTaskGetHandle */ /*-----------------------------------------------------------*/ From d1c625edaeec0012a6048e7d3dce6c417989c1e1 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 13:27:58 +0800 Subject: [PATCH 143/164] Yield for priority inherit and disinherit (#64) * Yield the core runs the task with prority changed when priority inheritance and disinheritance. --- tasks.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tasks.c b/tasks.c index 09ab0d0399a..6fa4ed27c05 100644 --- a/tasks.c +++ b/tasks.c @@ -5889,6 +5889,16 @@ static void prvResetNextTaskUnblockTime( void ) /* Inherit the priority before being moved into the new list. */ pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; prvAddTaskToReadyList( pxMutexHolderTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is raised. Yield for this task + * if it is not running. */ + if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE ) + { + prvYieldForTask( pxMutexHolderTCB ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -5979,6 +5989,16 @@ static void prvResetNextTaskUnblockTime( void ) * running to give back the mutex. */ listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ /* Return true to indicate that a context switch is required. * This is only actually required in the corner case whereby @@ -6092,6 +6112,16 @@ static void prvResetNextTaskUnblockTime( void ) } prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { From 6ce84d8b2a4b03d097d61c7815dbdfa578af7d1b Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 16:35:11 +0800 Subject: [PATCH 144/164] fix performance counting for SMP (#65) * performance counting: ulTaskSwitchedInTime and ulTotalRunTime must be (#618) arrays, index is core number --------- Co-authored-by: Hardy Griech --- tasks.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tasks.c b/tasks.c index 6fa4ed27c05..2d761e34375 100644 --- a/tasks.c +++ b/tasks.c @@ -456,8 +456,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUM_CORES ] = { 0UL }; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUM_CORES ] = { 0UL }; /*< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -4576,9 +4576,9 @@ BaseType_t xTaskIncrementTick( void ) #if ( configGENERATE_RUN_TIME_STATS == 1 ) { #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] ); #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -4588,16 +4588,16 @@ BaseType_t xTaskIncrementTick( void ) * overflows. The guard against negative values is to protect * against suspect run time stat counter implementations - which * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] ); } else { mtCOVERAGE_TEST_MARKER(); } - ulTaskSwitchedInTime = ulTotalRunTime; + ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ]; } #endif /* configGENERATE_RUN_TIME_STATS */ From 4fed100bbc5c3bde188c716f52a998133c31ba72 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 16:35:47 +0800 Subject: [PATCH 145/164] Remomve unreachable assert in prvCheckForRunStateChange (#68) * Previous assert already ensure this assert won't be triggered --- tasks.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tasks.c b/tasks.c index 2d761e34375..310fd6fb9d8 100644 --- a/tasks.c +++ b/tasks.c @@ -741,7 +741,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting == 0U ) { /* uxPrevSchedulerSuspended must be 1. */ - configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); portRELEASE_ISR_LOCK(); } } From 8c067d612760a25f526f5888410de613bd2aaf53 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 13 Apr 2023 16:36:45 +0800 Subject: [PATCH 146/164] Remove unreachable code in preYieldForTask (#69) * xLowestPriorityCore index can't be greater than configNUMBER_OF_CORES --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 310fd6fb9d8..bf226b15cda 100644 --- a/tasks.c +++ b/tasks.c @@ -857,7 +857,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } - if( ( xYieldCount == 0 ) && ( taskVALID_CORE_ID( xLowestPriorityCore ) == pdTRUE ) ) + if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) ) { prvYieldCore( xLowestPriorityCore ); } From a286632c6980e6c4e898ab9e11de56b2784c9d48 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:18:42 +0800 Subject: [PATCH 147/164] Add first version of XCOREAI port (#63) * xTaskIncrementTick need to be called in critical section * Rename configNUM_CORES to configNUMBER_OF_CORES * Define portENTER/EXIT_CRITICAL_FROM_ISR for SMP * portSET/CLEAR_INTERRUPT_MASK_FROM_ISR is not used in SMP --- portable/ThirdParty/xClang/XCOREAI/port.c | 254 ++++++++++++++++++ portable/ThirdParty/xClang/XCOREAI/port.xc | 26 ++ portable/ThirdParty/xClang/XCOREAI/portasm.S | 189 +++++++++++++ .../ThirdParty/xClang/XCOREAI/portmacro.h | 213 +++++++++++++++ .../xClang/XCOREAI/rtos_support_rtos_config.h | 95 +++++++ 5 files changed, 777 insertions(+) create mode 100644 portable/ThirdParty/xClang/XCOREAI/port.c create mode 100644 portable/ThirdParty/xClang/XCOREAI/port.xc create mode 100644 portable/ThirdParty/xClang/XCOREAI/portasm.S create mode 100644 portable/ThirdParty/xClang/XCOREAI/portmacro.h create mode 100644 portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h diff --git a/portable/ThirdParty/xClang/XCOREAI/port.c b/portable/ThirdParty/xClang/XCOREAI/port.c new file mode 100644 index 00000000000..0d730738304 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/port.c @@ -0,0 +1,254 @@ +// Copyright (c) 2019, XMOS Ltd, All rights reserved + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include +#include +#include +#include + +static hwtimer_t xKernelTimer; + +uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE }; + +/*-----------------------------------------------------------*/ + +void vIntercoreInterruptISR( void ) +{ + int xCoreID; + +// debug_printf( "In KCALL: %u\n", ulData ); + xCoreID = rtos_core_id_get(); + ulPortYieldRequired[ xCoreID ] = pdTRUE; +} +/*-----------------------------------------------------------*/ + +DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) +{ + uint32_t ulLastTrigger; + uint32_t ulNow; + int xCoreID; + UBaseType_t uxSavedInterruptStatus; + + xCoreID = 0; + + configASSERT( xCoreID == rtos_core_id_get() ); + + /* Need the next interrupt to be scheduled relative to + * the current trigger time, rather than the current + * time. */ + ulLastTrigger = hwtimer_get_trigger_time( xKernelTimer ); + + /* Check to see if the ISR is late. If it is, we don't + * want to schedule the next interrupt to be in the past. */ + ulNow = hwtimer_get_time( xKernelTimer ); + if( ulNow - ulLastTrigger >= configCPU_CLOCK_HZ / configTICK_RATE_HZ ) + { + ulLastTrigger = ulNow; + } + + ulLastTrigger += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + hwtimer_change_trigger_time( xKernelTimer, ulLastTrigger ); + +#if configUPDATE_RTOS_TIME_FROM_TICK_ISR == 1 + rtos_time_increment( RTOS_TICK_PERIOD( configTICK_RATE_HZ ) ); +#endif + + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + if( xTaskIncrementTick() != pdFALSE ) + { + ulPortYieldRequired[ xCoreID ] = pdTRUE; + } + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); +} +/*-----------------------------------------------------------*/ + +void vPortYieldOtherCore( int xOtherCoreID ) +{ + int xCoreID; + + /* + * This function must be called from within a critical section. + */ + + xCoreID = rtos_core_id_get(); + +// debug_printf("%d->%d\n", xCoreID, xOtherCoreID); + +// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID ); + + rtos_irq( xOtherCoreID, xCoreID ); +} +/*-----------------------------------------------------------*/ + +static int prvCoreInit( void ) +{ + int xCoreID; + + xCoreID = rtos_core_register(); + debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID ); + + asm volatile ( + "ldap r11, kexcept\n\t" + "set kep, r11\n\t" + : + : + : "r11" + ); + + rtos_irq_enable( configNUMBER_OF_CORES ); + + /* + * All threads wait here until all have enabled IRQs + */ + while( rtos_irq_ready() == pdFALSE ); + + if( xCoreID == 0 ) + { + uint32_t ulNow; + ulNow = hwtimer_get_time( xKernelTimer ); +// debug_printf( "The time is now (%u)\n", ulNow ); + + ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + + triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); + hwtimer_set_trigger_time( xKernelTimer, ulNow ); + triggerable_enable_trigger( xKernelTimer ); + } + + return xCoreID; +} +/*-----------------------------------------------------------*/ + +DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void ) +{ + int xCoreID; + + xCoreID = prvCoreInit(); + + #if( configUSE_CORE_INIT_HOOK == 1 ) + { + extern void vApplicationCoreInitHook( BaseType_t xCoreID ); + + vApplicationCoreInitHook( xCoreID ); + } + #endif + + debug_printf( "FreeRTOS Core %d initialized\n", xCoreID ); + + /* + * Restore the context of the first thread + * to run and jump into it. + */ + asm volatile ( + "mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/ + "ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */ + "bu _freertos_restore_ctx\n\t" + : /* no outputs */ + : "r"(xCoreID) + : "r5", "r6" + ); +} +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* Public functions required by all ports below: */ +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + //debug_printf( "Top of stack was %p for task %p\n", pxTopOfStack, pxCode ); + /* + * Grow the thread's stack by portTHREAD_CONTEXT_STACK_GROWTH + * so we can push the context onto it. + */ + pxTopOfStack -= portTHREAD_CONTEXT_STACK_GROWTH; + + uint32_t dp; + uint32_t cp; + + /* + * We need to get the current CP and DP pointers. + */ + asm volatile ( + "ldaw r11, cp[0]\n\t" /* get CP into R11 */ + "mov %0, r11\n\t" /* get R11 (CP) into cp */ + "ldaw r11, dp[0]\n\t" /* get DP into R11 */ + "mov %1, r11\n\t" /* get R11 (DP) into dp */ + : "=r"(cp), "=r"(dp) /* output 0 is cp, output 1 is dp */ + : /* there are no inputs */ + : "r11" /* R11 gets clobbered */ + ); + + /* + * Push the thread context onto the stack. + * Saved PC will point to the new thread's + * entry pointer. + * Interrupts will default to enabled. + * KEDI is also set to enable dual issue mode + * upon kernel entry. + */ + pxTopOfStack[ 1 ] = ( StackType_t ) pxCode; /* SP[1] := SPC */ + pxTopOfStack[ 2 ] = XS1_SR_IEBLE_MASK + | XS1_SR_KEDI_MASK; /* SP[2] := SSR */ + pxTopOfStack[ 3 ] = 0x00000000; /* SP[3] := SED */ + pxTopOfStack[ 4 ] = 0x00000000; /* SP[4] := ET */ + pxTopOfStack[ 5 ] = dp; /* SP[5] := DP */ + pxTopOfStack[ 6 ] = cp; /* SP[6] := CP */ + pxTopOfStack[ 7 ] = 0x00000000; /* SP[7] := LR */ + pxTopOfStack[ 8 ] = ( StackType_t ) pvParameters; /* SP[8] := R0 */ + pxTopOfStack[ 9 ] = 0x01010101; /* SP[9] := R1 */ + pxTopOfStack[ 10 ] = 0x02020202; /* SP[10] := R2 */ + pxTopOfStack[ 11 ] = 0x03030303; /* SP[11] := R3 */ + pxTopOfStack[ 12 ] = 0x04040404; /* SP[12] := R4 */ + pxTopOfStack[ 13 ] = 0x05050505; /* SP[13] := R5 */ + pxTopOfStack[ 14 ] = 0x06060606; /* SP[14] := R6 */ + pxTopOfStack[ 15 ] = 0x07070707; /* SP[15] := R7 */ + pxTopOfStack[ 16 ] = 0x08080808; /* SP[16] := R8 */ + pxTopOfStack[ 17 ] = 0x09090909; /* SP[17] := R9 */ + pxTopOfStack[ 18 ] = 0x10101010; /* SP[18] := R10 */ + pxTopOfStack[ 19 ] = 0x11111111; /* SP[19] := R11 */ + pxTopOfStack[ 20 ] = 0x00000000; /* SP[20] := vH and vSR */ + memset(&pxTopOfStack[21], 0, 32); /* SP[21 - 28] := vR */ + memset(&pxTopOfStack[29], 1, 32); /* SP[29 - 36] := vD */ + memset(&pxTopOfStack[37], 2, 32); /* SP[37 - 44] := vC */ + + //debug_printf( "Top of stack is now %p for task %p\n", pxTopOfStack, pxCode ); + + /* + * Returns the new top of the stack + */ + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +void vPortStartSMPScheduler( void ); + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + if( ( configNUMBER_OF_CORES > portMAX_CORE_COUNT ) || ( configNUMBER_OF_CORES <= 0 ) ) + { + return pdFAIL; + } + + rtos_locks_initialize(); + xKernelTimer = hwtimer_alloc(); + + vPortStartSMPScheduler(); + + return pdPASS; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Do not implement. */ +} +/*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/xClang/XCOREAI/port.xc b/portable/ThirdParty/xClang/XCOREAI/port.xc new file mode 100644 index 00000000000..926b15086c9 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/port.xc @@ -0,0 +1,26 @@ +/* + * port.xc + * + * Created on: Jul 31, 2019 + * Author: mbruno + */ + +//#include "rtos_support.h" + +extern "C" { + +#include "FreeRTOSConfig.h" /* to get configNUMBER_OF_CORES */ +#ifndef configNUMBER_OF_CORES +#define configNUMBER_OF_CORES 1 +#endif + +void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void); + +} /* extern "C" */ + +void vPortStartSMPScheduler( void ) +{ + par (int i = 0; i < configNUMBER_OF_CORES; i++) { + __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(); + } +} diff --git a/portable/ThirdParty/xClang/XCOREAI/portasm.S b/portable/ThirdParty/xClang/XCOREAI/portasm.S new file mode 100644 index 00000000000..702e9a2f021 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/portasm.S @@ -0,0 +1,189 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#include "rtos_support_rtos_config.h" + +/* The FreeRTOS interrupt code calls vTaskSwitchContext. +Therfore it must be added to the rtos_isr group with the +rest of the ISR callback functions. */ +.weak _fptrgroup.rtos_isr.nstackwords.group +.add_to_set _fptrgroup.rtos_isr.nstackwords.group, vTaskSwitchContext.nstackwords, vTaskSwitchContext + +.globl kexcept +.align 128 /* align the kernel section to 128 bytes */ +.type kexcept,@function +.issue_mode dual +.cc_top kexcept.function, kexcept +kexcept: + ldc r11, 0x0008 + shl r11, r11, 16 + ldc r9, 0x0080 + or r11, r11, r9 + bau r11 //_TrapHandler is at 0x00080080. TODO: Is it always? Why can't I access the symbol _TrapHandler? + +_yield: + {set sp, r4 /* Restore the task's SP to save the rest of its context. */ + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */ + bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ + /* the context save and switch. Also skips saving SPC */ + /* since the kcall handler has already saved it. */ + +.align 64 +kcall: + /* start saving the thread's context */ + extsp RTOS_SUPPORT_INTERRUPT_STACK_GROWTH + stw r1, sp[9] + stw r11, sp[19] + + /* kcall sets SPC to the instruction of the kcall rather than the next instruction */ + /* so we need to adjust the SPC value that we save to the stack: */ + stw spc, sp[1] /* save the saved program counter onto the stack... */ + ldw r1, sp[1] /* so that we can load it into r1 (which we have already saved). */ + add r1, r1, 4 /* Add 4 to the spc to make it point to the instruction after the kcall. */ + {stw r1, sp[1] /* Now save it to the stack. */ + + /* kcall uses the same common function as interrupt callbacks. */ + /* tell it to call _yield above. */ + ldap r11, _yield} + mov r1, r11 + + /* fall into rtos_interrupt_callback_common */ + +.globl rtos_interrupt_callback_common +rtos_interrupt_callback_common: + /* This is the body of the RTOS _xcore_c_interrupt_callback_XXX functions. */ + /* r1 = interrupt_callback_t function */ + + /* Save the thread's context onto the thread's stack. */ + /* The stack was extended for this by the wrapper function. */ + /* Begin only by saving some registers. The rest will be saved */ + /* later if vTaskSwitchContext() needs to be called. */ + /* DP and CP need to be saved because these are restored for the kernel ISR. */ + /* LR needs to be saved because it is clobbered when calling the callback. */ + /* r0-r3, and r11 need to be saved because the callback may clobber them. */ + /* r4 is saved because it is used here to hold the task SP. */ + + stw dp, sp[5] + stw cp, sp[6] + stw lr, sp[7] + stw r0, sp[8] +/*stw r1, sp[9] already saved by the wrapper function. */ + stw r2, sp[10] + stw r3, sp[11] + {stw r4, sp[12] +/*stw r11, sp[19] already saved by the wrapper function. */ + + ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */ + + {kentsp 0 /* switch to the kernel stack. */ + /* The value 0 is safe to use since we don't need the SP */ + /* that it saves to KSP[0]. We already have it in r4. */ + + get r11, ed} /* Get the event data... */ + ldw dp, sp[3] /* (Restore CP and DP required for the RTOS ISR */ + ldw cp, sp[4] /* in case the active thread has modified them.) */ + {mov r0, r11 /* ...into the first argument for the callback function, */ + bla r1} /* and call the callback function. */ + + {set sp, r4 /* Restore the task's SP now. */ + + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */ + ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */ + ldw r1, r2[r0] /* Is a yield required for this core? */ + {bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */ + ldc r1, 0} + stw r1, r2[r0] /* Otherwise, clear the yield required flag. */ + + /* Save the rest of the current task's context. */ + + /* Save standard xs2 regs */ + stw spc, sp[1] +_yield_continue: + stw ssr, sp[2] + stw sed, sp[3] + stw et, sp[4] + stw r5, sp[13] + stw r6, sp[14] + stw r7, sp[15] + stw r8, sp[16] + stw r9, sp[17] + stw r10, sp[18] +#if 1 + /* Save VPU status and headroom */ + vgetc r11 + {stw r11, sp[20] + /* Save VPU regs */ + ldaw r11, sp[21]} + {vstr r11[0] + ldaw r11, sp[29]} + {vstd r11[0] + ldaw r11, sp[37]} + vstc r11[0] +#endif + ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */ + ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */ + stw r4, r1[0x0] /* Save the current task's SP to the first */ + /* word (top of stack) in the current TCB. */ + + {kentsp 0 /* switch back to the kernel stack. */ + + mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */ + ldap r11, vTaskSwitchContext + bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */ + /* entire context is saved. Note the core id in r0 is the argument. */ + +//krestsp 0 /* unnecessary since KSP is already set and the SP */ + /* is being restored next from the current TCB. */ + +.globl _freertos_restore_ctx +_freertos_restore_ctx: + + ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */ + ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */ + set sp, r0 /* into the stack pointer register. */ + + /* Restore the current task's context */ +#if 1 + /* Restore VPU regs */ + ldaw r11, sp[37] + {vldc r11[0] + ldaw r11, sp[29]} + {vldd r11[0] + ldaw r11, sp[21]} + vldr r11[0] + /* Restore VPU status and headroom */ + ldw r11, sp[20] + vsetc r11 +#endif + /* Restore standard xs2 regs */ + ldw spc, sp[1] + ldw ssr, sp[2] + ldw sed, sp[3] + ldw et, sp[4] + ldw r5, sp[13] + ldw r6, sp[14] + ldw r7, sp[15] + ldw r8, sp[16] + ldw r9, sp[17] + ldw r10, sp[18] +_freertos_restore_ctx_partial: + ldw dp, sp[5] + ldw cp, sp[6] + ldw lr, sp[7] + ldw r0, sp[8] + ldw r1, sp[9] + ldw r2, sp[10] + ldw r3, sp[11] + ldw r4, sp[12] + {ldw r11, sp[19] + + /* shrink the stack by the size of the context just restored */ + ldaw sp, sp[RTOS_SUPPORT_INTERRUPT_STACK_GROWTH]} + + kret /* exit kernel mode and return to the thread */ + +.cc_bottom kexcept.function + diff --git a/portable/ThirdParty/xClang/XCOREAI/portmacro.h b/portable/ThirdParty/xClang/XCOREAI/portmacro.h new file mode 100644 index 00000000000..aef12cf3749 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/portmacro.h @@ -0,0 +1,213 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifndef __ASSEMBLER__ + +/* Inclusion of xc1.h will result in clock being defined as a type. + * By default, FreeRTOS will require standard time.h, where clock is a function. + */ +#ifndef USE_XCORE_CLOCK_TYPE +#define _clock_defined +#endif + +#include +#include "rtos_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Type definitions. */ +#define portSTACK_TYPE uint32_t +typedef portSTACK_TYPE StackType_t; +typedef double portDOUBLE; +typedef int32_t BaseType_t; +typedef uint32_t UBaseType_t; + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +#endif /* __ASSEMBLER__ */ + +/* Architecture specifics. These can be used by assembly files as well. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portCRITICAL_NESTING_IN_TCB 1 +#define portMAX_CORE_COUNT 8 +#ifndef configNUMBER_OF_CORES +#define configNUMBER_OF_CORES 1 +#endif + +/* This may be set to zero in the config file if the rtos_time +functions are not needed or if it is incremented elsewhere. */ +#ifndef configUPDATE_RTOS_TIME_FROM_TICK_ISR +#define configUPDATE_RTOS_TIME_FROM_TICK_ISR 1 +#endif + +/* + * When entering an ISR we need to grow the stack by one more word than + * we actually need to save the thread context. This is because there are + * some functions, written in assembly *cough* memcpy() *cough*, that think + * it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone + * even though it is normally not necessary to do so. + */ +#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH + +#ifndef __ASSEMBLER__ + +/* Check validity of number of cores specified in config */ +#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES ) +#error "Invalid number of cores specified in config!" +#endif + +#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER() +#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode) +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() asm volatile( "KCALLI_lu6 0" ::: "memory" ) + +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ +do \ +{ \ + if( xSwitchRequired != pdFALSE ) \ + { \ + extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \ + ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \ + } \ +} while( 0 ) + +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* SMP utilities. */ +#define portGET_CORE_ID() rtos_core_id_get() + +void vPortYieldOtherCore( int xOtherCoreID ); +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) __builtin_clz( uxReadyPriorities ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +/*-----------------------------------------------------------*/ + +/* Critical section management. */ + +#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get() + +/* + * This differs from the standard portDISABLE_INTERRUPTS() + * in that it also returns what the interrupt state was + * before it disabling interrupts. + */ +#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all() + +#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all() + +/* + * Port set interrupt mask and clear interrupt mask. + */ +#define portSET_INTERRUPT_MASK() rtos_interrupt_mask_all() +#define portCLEAR_INTERRUPT_MASK( ulState ) rtos_interrupt_mask_set( ulState ) + +#define portSET_INTERRUPT_MASK_FROM_ISR() ( 0 ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) ( (void) x ) + +/* + * Will enable interrupts if ulState is non-zero. + */ +#define portRESTORE_INTERRUPTS(ulState) rtos_interrupt_mask_set(ulState) + +/* + * Returns non-zero if currently running in an + * ISR or otherwise in kernel mode. + */ +#define portCHECK_IF_IN_ISR() rtos_isr_running() + +#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 ) + +#define portGET_ISR_LOCK() rtos_lock_acquire(0) +#define portRELEASE_ISR_LOCK() rtos_lock_release(0) +#define portGET_TASK_LOCK() rtos_lock_acquire(1) +#define portRELEASE_TASK_LOCK() rtos_lock_release(1) + +void vTaskEnterCritical(void); +void vTaskExitCritical(void); +#define portENTER_CRITICAL() vTaskEnterCritical() +#define portEXIT_CRITICAL() vTaskExitCritical() + +extern UBaseType_t vTaskEnterCriticalFromISR( void ); +extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); +#define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR +#define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR + +/*-----------------------------------------------------------*/ + +/* Runtime stats support */ +#if ( configGENERATE_RUN_TIME_STATS == 1 ) +int xscope_gettime( void ); +#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* nothing needed here */ +#define portGET_RUN_TIME_COUNTER_VALUE() xscope_gettime() +#endif +/*-----------------------------------------------------------*/ + +/* Maps sprintf and snprintf to the lite version in lib_rtos_support */ +#if ( configUSE_DEBUG_SPRINTF == 1 ) +#define sprintf(...) rtos_sprintf(__VA_ARGS__) +#define snprintf(...) rtos_snprintf(__VA_ARGS__) +#endif + +/* Attribute for the pxCallbackFunction member of the Timer_t struct. +Required by xcc to calculate stack usage. */ +#define portTIMER_CALLBACK_ATTRIBUTE __attribute__((fptrgroup("timerCallbackGroup"))) + +/* Timer callback function macros. For xcc this ensures they get added to the timer callback +group so that stack usage for certain functions in timers.c can be calculated. */ +#define portTIMER_CALLBACK_FUNCTION_PROTO( vFunction, xTimer ) void vFunction( TimerHandle_t xTimer ) +#define portTIMER_CALLBACK_FUNCTION( vFunction, xTimer ) portTIMER_CALLBACK_ATTRIBUTE void vFunction( TimerHandle_t xTimer ) + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + + +#ifdef __cplusplus +} +#endif + +#endif /* __ASSEMBLER__ */ + +#endif /* PORTMACRO_H */ + diff --git a/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h b/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h new file mode 100644 index 00000000000..8b8d1054dd6 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h @@ -0,0 +1,95 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#ifndef RTOS_SUPPORT_RTOS_CONFIG_H_ +#define RTOS_SUPPORT_RTOS_CONFIG_H_ + +/** + * Lets the application know that the RTOS in use is FreeRTOS. + */ +#define RTOS_FREERTOS 1 + +/** + * The number of words to extend the stack by when entering an ISR. + * + * When entering an ISR we need to grow the stack by one more word than + * we actually need to save the thread context. This is because there are + * some functions, written in assembly *cough* memcpy() *cough*, that think + * it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone + * even though it is normally not necessary to do so. + */ +#define RTOS_SUPPORT_INTERRUPT_STACK_GROWTH ( 44 + 1 ) + +/** + * The word offset into the stack where R1 is to be stored after it + * is extended when saving a thread's context. + */ +#define RTOS_SUPPORT_INTERRUPT_R1_STACK_OFFSET 9 + +/** + * The word offset into the stack where R11 is to be stored after it + * is extended when saving a thread's context. + */ +#define RTOS_SUPPORT_INTERRUPT_R11_STACK_OFFSET 19 + +/** + * The RTOS provided handler that should run when a + * core receives an intercore interrupt request. + */ +#define RTOS_INTERCORE_INTERRUPT_ISR() do { \ + void vIntercoreInterruptISR( void ); \ + vIntercoreInterruptISR(); \ +} while ( 0 ) + +/** + * The number of hardware locks that the RTOS + * requires. For a single core RTOS this could be + * zero. Locks are recursive. + * + * Note that the IRQ routines require a lock and + * will share the first one with the RTOS. + */ +#define RTOS_LOCK_COUNT 2 + +/** + * Remaps all calls to debug_printf() to rtos_printf(). + * When this is on, files should not include both rtos_support.h + * and debug_print.h. + */ +#define RTOS_DEBUG_PRINTF_REMAP 1 + + +#ifdef configENABLE_DEBUG_PRINTF + #if configENABLE_DEBUG_PRINTF + + /* ensure that debug_printf is enabled */ + #ifdef DEBUG_PRINT_ENABLE + #undef DEBUG_PRINT_ENABLE + #endif + #define DEBUG_PRINT_ENABLE 1 + + #ifndef configTASKS_DEBUG + #define configTASKS_DEBUG 0 + #endif + #if configTASKS_DEBUG == 1 + #define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1 + #else + #define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1 + #endif + + #else /* configENABLE_DEBUG_PRINTF */ + + /* ensure that debug_printf is disabled */ + #ifdef DEBUG_UNIT + #undef DEBUG_UNIT + #endif + #ifdef DEBUG_PRINT_ENABLE + #undef DEBUG_PRINT_ENABLE + #endif + + #define DEBUG_PRINT_ENABLE 0 + + #endif /* configENABLE_DEBUG_PRINTF */ +#endif + +#endif /* RTOS_SUPPORT_RTOS_CONFIG_H_ */ + From 3c31ea9d86e6d22e9575d6cc677013c27903f769 Mon Sep 17 00:00:00 2001 From: Darian <32921628+Dazza0@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:19:57 +0800 Subject: [PATCH 148/164] Fix configDEINIT_TLS_BLOCK (#73) configDEINIT_TLS_BLOCK() should deinit the TLS block of the task to being deleted instead of the currently running task. --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index bf226b15cda..5e862d8b131 100644 --- a/tasks.c +++ b/tasks.c @@ -5695,7 +5695,7 @@ static void prvCheckTasksWaitingTermination( void ) #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { /* Free up the memory allocated for the task's TLS Block. */ - configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock ); } #endif From 3578892bb22ac7f460c7db5996e73bb1ab8780f0 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 18 Apr 2023 20:45:39 +0800 Subject: [PATCH 149/164] Sync with main branch (#71) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi --- .github/actions/url_verifier.sh | 11 +- .github/lexicon.txt | 23 + .github/pull_request_template.md | 7 + .github/workflows/kernel-checks.yml | 26 - .github/workflows/kernel-demos.yml | 181 +++ .github/workflows/unit-tests.yml | 4 +- CMakeLists.txt | 46 +- README.md | 4 +- event_groups.c | 57 +- include/FreeRTOS.h | 72 +- include/event_groups.h | 48 +- include/list.h | 38 +- include/message_buffer.h | 31 + include/newlib-freertos.h | 62 + include/picolibc-freertos.h | 90 ++ include/projdefs.h | 13 +- include/queue.h | 47 +- include/semphr.h | 21 + include/stack_macros.h | 4 +- include/stream_buffer.h | 32 + include/task.h | 103 +- include/timers.h | 34 + portable/ARMv8M/copy_files.py | 30 +- .../portable/GCC/ARM_CM23/portmacro.h | 1 + .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 1 + .../portable/GCC/ARM_CM33/portmacro.h | 1 + .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 1 + .../portable/GCC/ARM_CM35P/portmacro.h | 67 + .../portable/GCC/ARM_CM55/portmacro.h | 1 + .../portable/GCC/ARM_CM85/portmacro.h | 1 + .../portable/IAR/ARM_CM35P/portmacro.h | 78 + portable/ARMv8M/non_secure/portmacrocommon.h | 6 +- portable/BCC/16BitDOS/Flsh186/prtmacro.h | 6 +- portable/BCC/16BitDOS/PC/prtmacro.h | 6 +- portable/CCS/ARM_CM3/port.c | 82 +- portable/CCS/ARM_CM3/portmacro.h | 6 +- portable/CCS/ARM_CM4F/port.c | 82 +- portable/CCS/ARM_CM4F/portmacro.h | 6 +- portable/CCS/ARM_Cortex-R4/portmacro.h | 6 +- portable/CCS/MSP430X/portmacro.h | 8 +- portable/CMakeLists.txt | 36 + portable/CodeWarrior/ColdFire_V1/portmacro.h | 8 +- portable/CodeWarrior/ColdFire_V2/portmacro.h | 8 +- portable/CodeWarrior/HCS12/portmacro.h | 8 +- portable/GCC/ARM7_AT91FR40008/portmacro.h | 8 +- portable/GCC/ARM7_AT91SAM7S/portmacro.h | 8 +- portable/GCC/ARM7_LPC2000/portmacro.h | 8 +- portable/GCC/ARM7_LPC23xx/portmacro.h | 8 +- portable/GCC/ARM_CA53_64_BIT/port.c | 6 +- portable/GCC/ARM_CA9/port.c | 6 +- portable/GCC/ARM_CM0/portmacro.h | 7 +- portable/GCC/ARM_CM23/non_secure/portasm.c | 2 +- portable/GCC/ARM_CM23/non_secure/portmacro.h | 1 + .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 6 +- .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 1 + .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 6 +- portable/GCC/ARM_CM3/port.c | 67 +- portable/GCC/ARM_CM3/portmacro.h | 7 +- portable/GCC/ARM_CM33/non_secure/portmacro.h | 1 + .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 6 +- .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 1 + .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 6 +- portable/GCC/ARM_CM35P/non_secure/port.c | 1261 +++++++++++++++++ portable/GCC/ARM_CM35P/non_secure/portasm.c | 470 ++++++ portable/GCC/ARM_CM35P/non_secure/portasm.h | 114 ++ portable/GCC/ARM_CM35P/non_secure/portmacro.h | 67 + .../ARM_CM35P/non_secure/portmacrocommon.h | 313 ++++ .../GCC/ARM_CM35P/secure/secure_context.c | 351 +++++ .../GCC/ARM_CM35P/secure/secure_context.h | 135 ++ .../ARM_CM35P/secure/secure_context_port.c | 97 ++ portable/GCC/ARM_CM35P/secure/secure_heap.c | 454 ++++++ portable/GCC/ARM_CM35P/secure/secure_heap.h | 66 + portable/GCC/ARM_CM35P/secure/secure_init.c | 106 ++ portable/GCC/ARM_CM35P/secure/secure_init.h | 54 + .../GCC/ARM_CM35P/secure/secure_port_macros.h | 140 ++ portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 1261 +++++++++++++++++ .../GCC/ARM_CM35P_NTZ/non_secure/portasm.c | 365 +++++ .../GCC/ARM_CM35P_NTZ/non_secure/portasm.h | 114 ++ .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 67 + .../non_secure/portmacrocommon.h | 313 ++++ portable/GCC/ARM_CM3_MPU/port.c | 60 +- portable/GCC/ARM_CM3_MPU/portmacro.h | 7 +- portable/GCC/ARM_CM4F/port.c | 96 +- portable/GCC/ARM_CM4F/portmacro.h | 10 +- portable/GCC/ARM_CM4_MPU/port.c | 60 +- portable/GCC/ARM_CM4_MPU/portmacro.h | 7 +- portable/GCC/ARM_CM55/non_secure/portmacro.h | 1 + .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 6 +- .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 1 + .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 6 +- portable/GCC/ARM_CM7/r0p1/port.c | 100 +- portable/GCC/ARM_CM7/r0p1/portmacro.h | 7 +- portable/GCC/ARM_CM85/non_secure/portmacro.h | 1 + .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 6 +- .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 1 + .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 6 +- portable/GCC/ARM_CR5/port.c | 311 ++-- portable/GCC/ARM_CR5/portASM.S | 86 +- portable/GCC/ARM_CR5/portmacro.h | 250 ++-- portable/GCC/ATMega323/portmacro.h | 8 +- portable/GCC/AVR32_UC3/portmacro.h | 8 +- portable/GCC/CORTUS_APS3/portmacro.h | 8 +- portable/GCC/ColdFire_V2/portmacro.h | 8 +- portable/GCC/H8S2329/portmacro.h | 8 +- portable/GCC/HCS12/portmacro.h | 8 +- portable/GCC/MSP430F449/portmacro.h | 8 +- portable/GCC/MicroBlaze/portmacro.h | 7 +- portable/GCC/MicroBlazeV8/portmacro.h | 7 +- portable/GCC/MicroBlazeV9/portmacro.h | 7 +- portable/GCC/NiosII/portmacro.h | 6 +- portable/GCC/PPC405_Xilinx/portmacro.h | 8 +- portable/GCC/PPC440_Xilinx/portmacro.h | 8 +- portable/GCC/RL78/portmacro.h | 8 +- portable/GCC/RX100/portmacro.h | 6 +- portable/GCC/RX200/portmacro.h | 6 +- portable/GCC/RX600/portmacro.h | 6 +- portable/GCC/RX600v2/portmacro.h | 6 +- portable/GCC/RX700v3_DPFPU/portmacro.h | 6 +- portable/GCC/STR75x/portmacro.h | 8 +- portable/GCC/TriCore_1782/portmacro.h | 6 +- portable/IAR/78K0R/portmacro.h | 8 +- portable/IAR/ARM_CM0/port.c | 7 - portable/IAR/ARM_CM0/portmacro.h | 6 +- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 6 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 6 +- portable/IAR/ARM_CM3/port.c | 65 +- portable/IAR/ARM_CM3/portmacro.h | 6 +- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 6 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 6 +- portable/IAR/ARM_CM35P/non_secure/port.c | 1261 +++++++++++++++++ portable/IAR/ARM_CM35P/non_secure/portasm.h | 114 ++ portable/IAR/ARM_CM35P/non_secure/portasm.s | 353 +++++ portable/IAR/ARM_CM35P/non_secure/portmacro.h | 78 + .../ARM_CM35P/non_secure/portmacrocommon.h | 313 ++++ .../IAR/ARM_CM35P/secure/secure_context.c | 351 +++++ .../IAR/ARM_CM35P/secure/secure_context.h | 135 ++ .../secure/secure_context_port_asm.s | 86 ++ portable/IAR/ARM_CM35P/secure/secure_heap.c | 454 ++++++ portable/IAR/ARM_CM35P/secure/secure_heap.h | 66 + portable/IAR/ARM_CM35P/secure/secure_init.c | 106 ++ portable/IAR/ARM_CM35P/secure/secure_init.h | 54 + .../IAR/ARM_CM35P/secure/secure_port_macros.h | 140 ++ portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 1261 +++++++++++++++++ .../IAR/ARM_CM35P_NTZ/non_secure/portasm.h | 114 ++ .../IAR/ARM_CM35P_NTZ/non_secure/portasm.s | 262 ++++ .../IAR/ARM_CM35P_NTZ/non_secure/portmacro.h | 78 + .../non_secure/portmacrocommon.h | 313 ++++ portable/IAR/ARM_CM4F/port.c | 58 +- portable/IAR/ARM_CM4F/portmacro.h | 6 +- portable/IAR/ARM_CM4F_MPU/port.c | 296 ++-- portable/IAR/ARM_CM4F_MPU/portmacro.h | 6 +- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 6 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 6 +- portable/IAR/ARM_CM7/r0p1/port.c | 58 +- portable/IAR/ARM_CM7/r0p1/portmacro.h | 6 +- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 6 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 6 +- portable/IAR/ATMega323/portmacro.h | 8 +- portable/IAR/AVR32_UC3/portmacro.h | 8 +- portable/IAR/AVR_AVRDx/portmacro.h | 8 +- portable/IAR/AVR_Mega0/portmacro.h | 8 +- portable/IAR/AtmelSAM7S64/portmacro.h | 8 +- portable/IAR/AtmelSAM9XE/portmacro.h | 8 +- portable/IAR/LPC2000/portmacro.h | 8 +- portable/IAR/MSP430/portmacro.h | 8 +- portable/IAR/MSP430X/portmacro.h | 8 +- portable/IAR/RL78/portmacro.h | 8 +- portable/IAR/RX100/portmacro.h | 6 +- portable/IAR/RX600/portmacro.h | 6 +- portable/IAR/RX700v3_DPFPU/portmacro.h | 6 +- portable/IAR/RXv2/portmacro.h | 6 +- portable/IAR/STR71x/portmacro.h | 8 +- portable/IAR/STR75x/portmacro.h | 8 +- portable/IAR/STR91x/portmacro.h | 8 +- portable/IAR/V850ES/portmacro.h | 8 +- portable/MPLAB/PIC18F/portmacro.h | 8 +- portable/MPLAB/PIC24_dsPIC/portmacro.h | 10 +- portable/MPLAB/PIC32MEC14xx/portmacro.h | 8 +- portable/MPLAB/PIC32MX/portmacro.h | 6 +- portable/MPLAB/PIC32MZ/portmacro.h | 6 +- portable/MSVC-MingW/port.c | 44 +- portable/MSVC-MingW/portmacro.h | 6 +- portable/MemMang/heap_4.c | 38 +- portable/MemMang/heap_5.c | 30 +- portable/MikroC/ARM_CM4F/port.c | 62 +- portable/MikroC/ARM_CM4F/portmacro.h | 6 +- .../Tern_EE/large_untested/portmacro.h | 8 +- portable/Paradigm/Tern_EE/small/portmacro.h | 8 +- portable/RVDS/ARM7_LPC21xx/portmacro.h | 8 +- portable/RVDS/ARM_CA9/portmacro.h | 6 +- portable/RVDS/ARM_CM0/portmacro.h | 6 +- portable/RVDS/ARM_CM3/port.c | 62 +- portable/RVDS/ARM_CM3/portmacro.h | 6 +- portable/RVDS/ARM_CM4F/port.c | 58 +- portable/RVDS/ARM_CM4F/portmacro.h | 6 +- portable/RVDS/ARM_CM4_MPU/port.c | 242 ++-- portable/RVDS/ARM_CM4_MPU/portmacro.h | 6 +- portable/RVDS/ARM_CM7/r0p1/port.c | 58 +- portable/RVDS/ARM_CM7/r0p1/portmacro.h | 6 +- portable/Renesas/RX100/portmacro.h | 6 +- portable/Renesas/RX200/portmacro.h | 6 +- portable/Renesas/RX600/portmacro.h | 6 +- portable/Renesas/RX600v2/portmacro.h | 6 +- portable/Renesas/RX700v3_DPFPU/portmacro.h | 6 +- portable/Renesas/SH2A_FPU/portmacro.h | 4 +- portable/Rowley/MSP430F449/portmacro.h | 8 +- portable/SDCC/Cygnal/portmacro.h | 8 +- portable/Softune/MB91460/portmacro.h | 8 +- portable/Softune/MB96340/portmacro.h | 8 +- portable/Tasking/ARM_CM4F/port.c | 10 +- portable/Tasking/ARM_CM4F/portmacro.h | 6 +- .../ThirdParty/CDK/T-HEAD_CK802/portmacro.h | 8 +- portable/ThirdParty/GCC/ARC_EM_HS/port.c | 12 +- portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h | 8 +- portable/ThirdParty/GCC/ARC_v1/port.c | 12 +- portable/ThirdParty/GCC/ARC_v1/portmacro.h | 8 +- portable/ThirdParty/GCC/ATmega/portmacro.h | 8 +- portable/ThirdParty/GCC/Posix/port.c | 15 +- portable/ThirdParty/GCC/Posix/portmacro.h | 4 +- .../ThirdParty/GCC/RP2040/include/portmacro.h | 11 +- portable/ThirdParty/GCC/RP2040/port.c | 12 +- .../GCC/Xtensa_ESP32/include/portmacro.h | 8 +- portable/ThirdParty/XCC/Xtensa/portmacro.h | 6 +- portable/WizC/PIC18/portmacro.h | 6 +- portable/oWatcom/16BitDOS/Flsh186/portmacro.h | 8 +- portable/oWatcom/16BitDOS/PC/portmacro.h | 8 +- queue.c | 113 +- stream_buffer.c | 84 +- tasks.c | 267 ++-- timers.c | 48 +- 230 files changed, 14716 insertions(+), 1440 deletions(-) create mode 100644 .github/workflows/kernel-demos.yml create mode 100644 include/newlib-freertos.h create mode 100644 include/picolibc-freertos.h create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h mode change 100644 => 100755 portable/CCS/ARM_CM3/port.c mode change 100644 => 100755 portable/CCS/ARM_CM4F/port.c mode change 100644 => 100755 portable/GCC/ARM_CM3/port.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/port.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM35P/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context_port.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_heap.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_heap.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_init.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_init.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_port_macros.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/port.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h mode change 100644 => 100755 portable/GCC/ARM_CM3_MPU/port.c mode change 100644 => 100755 portable/GCC/ARM_CM4F/port.c mode change 100644 => 100755 portable/GCC/ARM_CM4_MPU/port.c mode change 100644 => 100755 portable/GCC/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/IAR/ARM_CM3/port.c create mode 100644 portable/IAR/ARM_CM35P/non_secure/port.c create mode 100644 portable/IAR/ARM_CM35P/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM35P/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM35P/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s create mode 100644 portable/IAR/ARM_CM35P/secure/secure_heap.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_heap.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_init.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_init.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_port_macros.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/port.c create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h mode change 100644 => 100755 portable/IAR/ARM_CM4F/port.c mode change 100644 => 100755 portable/IAR/ARM_CM4F_MPU/port.c mode change 100644 => 100755 portable/IAR/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/MikroC/ARM_CM4F/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM3/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM4F/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM4_MPU/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/Tasking/ARM_CM4F/port.c diff --git a/.github/actions/url_verifier.sh b/.github/actions/url_verifier.sh index 4c8aed53b12..e9804657b20 100755 --- a/.github/actions/url_verifier.sh +++ b/.github/actions/url_verifier.sh @@ -28,13 +28,14 @@ function test { for UNIQ_URL in ${!dict[@]} # loop urls do - CURL_RES=$(curl -si --user-agent "$(USER_AGENT)" ${UNIQ_URL} 2>/dev/null| head -n 1 | cut -f 2 -d ' ') + CURL_RES=$(curl -si --user-agent "${USER_AGENT}" ${UNIQ_URL} 2>/dev/null| head -n 1 | cut -f 2 -d ' ') RES=$? + echo "=================================" + echo "Checking URL: ${UNIQ_URL}" + if [ "${CURL_RES}" == '' -o "${CURL_RES}" != '200' ] then - echo "URL is: ${UNIQ_URL}" - echo "File names: ${dict[$UNIQ_URL]}" if [ "${CURL_RES}" == '' ] # curl returned an error then CURL_RES=$RES @@ -47,8 +48,10 @@ function test { else echo WARNING: Result is: "${CURL_RES}" fi - echo "=================================" + else + echo SUCCESS: Result is: "${CURL_RES}" fi + echo "=================================" done if [ "${SCRIPT_RET}" -eq 0 ] diff --git a/.github/lexicon.txt b/.github/lexicon.txt index c36907a352a..305498fdc59 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1054,6 +1054,7 @@ mclk mconfigintcoresw mcr mcu +md mddr mder mdh @@ -1336,6 +1337,7 @@ phy phya pic picnt +picolibc pien piir pimr @@ -1463,13 +1465,24 @@ ppdc ppio ppitc ppmc +ppucmessagebufferstoragearea +ppucqueuestorage +ppucstreambufferstoragearea ppudr ppuer ppusr +ppuxstackbuffer ppvdestination ppwm +ppxeventgroupbuffer ppxidletaskstackbuffer ppxidletasktcbbuffer +ppxsemaphorebuffer +ppxstaticmessagebuffer +ppxstaticqueue +ppxstaticstreambuffer +ppxtaskbuffer +ppxtimerbuffer ppxtimertaskstackbuffer ppxtimertasktcbbuffer pr @@ -2325,6 +2338,8 @@ ulstoppedtimercompensation ultablebase ultaskgetidleruntimecounter ultaskgetidleruntimepercent +ultaskgetruntimecounter +ultaskgetruntimepercent ultaskhasfpucontext ultasknotifystateclear ultasknotifytake @@ -2465,6 +2480,7 @@ vaninterruptserviceroutine vanisr vanothertask vapplicationcleartimerinterrupt +vapplicationdaemontaskstartuphook vapplicationexceptionregisterdump vapplicationfpusafeirqhandler vapplicationgetidletaskmemory @@ -2739,6 +2755,7 @@ xeventgroupcreatestatic xeventgroupdelete xeventgroupgetbits xeventgroupgetbitsfromisr +xeventgroupgetstaticbuffer xeventgroupsetbits xeventgroupsetbitsfromisr xeventgroupsync @@ -2817,6 +2834,7 @@ xmessage xmessagebuffer xmessagebuffercreate xmessagebuffercreatestatic +xmessagebuffergetstaticbuffers xmessagebufferisempty xmessagebufferisfull xmessagebuffernextlengthbytes @@ -2888,6 +2906,7 @@ xqueuecreatestatic xqueuegenericsend xqueuegenericsendfromisr xqueuegetmutexholder +xqueuegetstaticbuffers xqueuegivefromisr xqueuegivemutexrecursive xqueueorsemaphore @@ -2942,6 +2961,7 @@ xsemaphorecreaterecursivemutex xsemaphorecreaterecursivemutexstatic xsemaphoregetmutexholder xsemaphoregetmutexholderfromisr +xsemaphoregetstaticbuffer xsemaphoregive xsemaphoregivefromisr xsemaphoregivemutexrecursive @@ -2966,6 +2986,7 @@ xstreambuffer xstreambufferbytesavailable xstreambuffercreate xstreambuffercreatestatic +xstreambuffergetstaticbuffers xstreambufferisempty xstreambufferisfull xstreambuffernextmessagelengthbytes @@ -3005,6 +3026,7 @@ xtaskgetcurrenttaskhandle xtaskgethandle xtaskgetidletaskhandle xtaskgetschedulerstate +xtaskgetstaticbuffers xtaskgettickcount xtaskgettickcountfromisr xtaskhandle @@ -3074,6 +3096,7 @@ xtimergenericcommand xtimergetexpirytime xtimergetperiod xtimergetreloadmode +xtimergetstaticbuffer xtimergettimerdaemontaskhandle xtimeristimeractive xtimerlistitem diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c3c8607fbde..a39aa2bfa33 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,6 +8,13 @@ Test Steps ----------- +Checklist: +---------- + + +- [ ] I have tested my changes. No regression in existing tests. +- [ ] I have modified and/or added unit-tests to cover the code changes in this Pull Request. + Related Issue ----------- diff --git a/.github/workflows/kernel-checks.yml b/.github/workflows/kernel-checks.yml index 889a53e6c22..a4627f0e07c 100644 --- a/.github/workflows/kernel-checks.yml +++ b/.github/workflows/kernel-checks.yml @@ -28,7 +28,6 @@ jobs: - name: Checkout Pull Request uses: actions/checkout@v2 with: - ref: ${{ github.event.pull_request.head.sha }} path: inspect # Collect all affected files @@ -45,28 +44,3 @@ jobs: cd inspect .github/scripts/kernel_checker.py --json ${HOME}/files_modified.json ${HOME}/files_added.json ${HOME}/files_renamed.json exit $? - build-checker: - name: FreeRTOS Posix Build Check - runs-on: ubuntu-latest - steps: - - name: Checkout the parent repository - uses: actions/checkout@v2 - with: - ref: main - repository: FreeRTOS/FreeRTOS - submodules: 'recursive' - fetch-depth: 1 - path: ./workspace - - name: Checkout the current repository - uses: actions/checkout@v2 - with: - path: ./workspace/FreeRTOS/Source - - name: Posix Build Checker - run: | - bash workspace/.github/scripts/posix_build_checker.sh workspace - - name: Install lib pcap dev - run: | - sudo apt-get install libpcap-dev - - name: Posix Network Build Checker - run: | - bash workspace/.github/scripts/posix_network_build_checker.sh workspace diff --git a/.github/workflows/kernel-demos.yml b/.github/workflows/kernel-demos.yml new file mode 100644 index 00000000000..96a4c59759e --- /dev/null +++ b/.github/workflows/kernel-demos.yml @@ -0,0 +1,181 @@ +name: FreeRTOS-Kernel Demos +on: [push, pull_request] + +jobs: + WIN32-MSVC: + name: WIN32 MSVC + runs-on: windows-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source + + - name: Add msbuild to PATH + uses: microsoft/setup-msbuild@v1.1 + + - name: Build WIN32-MSVC Demo + working-directory: FreeRTOS/Demo/WIN32-MSVC + run: msbuild WIN32.sln -t:rebuild + + - name: Build WIN32-MSVC-Static-Allocation-Only Demo + working-directory: FreeRTOS/Demo/WIN32-MSVC-Static-Allocation-Only + run: msbuild WIN32.sln -t:rebuild + + WIN32-MingW: + name: WIN32 MingW + runs-on: windows-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source + + - name: Build WIN32-MingW Demo + working-directory: FreeRTOS/Demo/WIN32-MingW + run: | + gcc --version + make --version + make + + POSIX-GCC: + name: Native GCC + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source + + - name: Install GCC + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install build-essential + + - name: Build Posix_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/Posix_GCC + run: make -j + + MSP430-GCC: + name: GNU MSP430 Toolchain + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source + + - name: Install MSP430 Toolchain + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install gcc-msp430 build-essential + + - name: Build msp430_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/msp430_GCC + run: make -j + + ARM-GCC: + name: GNU ARM Toolchain + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source + + - name: Install GNU ARM Toolchain + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install gcc-arm-none-eabi build-essential cmake git ninja-build python3-minimal + + - name: Build CORTEX_MPU_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_MPU_M3_MPS2_QEMU_GCC + run: make -j + + - name: Build CORTEX_LM3S102_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_LM3S102_GCC + run: make -j + + - name: Build CORTEX_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC + run: | + make clean + make -j + + - name: Build CORTEX_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC + run: | + make clean + make FULL_DEMO=1 -j + + - name: Build CORTEX_LM3S811_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_LM3S811_GCC + run: make -j + + - name: Build CORTEX_M0+_RP2040 Demos + shell: bash + working-directory: FreeRTOS/Demo/ThirdParty/Community-Supported/CORTEX_M0+_RP2040 + run: | + git clone https://github.com/raspberrypi/pico-sdk.git + cmake -B build -DPICO_SDK_PATH=pico-sdk -GNinja + ninja -C build --verbose + + - name: Build CORTEX_MPS2_QEMU_IAR_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_MPS2_QEMU_IAR_GCC + run: make -C build/gcc -j diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index a714b757256..73e1808fa25 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -12,7 +12,9 @@ jobs: repository: FreeRTOS/FreeRTOS submodules: 'recursive' fetch-depth: 1 - - name: Clone This Repo + + # Checkout user pull request changes + - name: Checkout Pull Request uses: actions/checkout@v2 with: path: ./FreeRTOS/Source diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d4a7799f7c..d45de64b1b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,6 +74,9 @@ if(NOT FREERTOS_PORT) " GCC_ARM_CM33_SECURE - Compiler: GCC Target: ARM Cortex-M33 secure\n" " GCC_ARM_CM33_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" " GCC_ARM_CM33_TFM - Compiler: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" + " GCC_ARM_CM35P_NONSECURE - Compiler: GCC Target: ARM Cortex-M35P non-secure\n" + " GCC_ARM_CM35P_SECURE - Compiler: GCC Target: ARM Cortex-M35P secure\n" + " GCC_ARM_CM35P_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M35P non-trustzone non-secure\n" " GCC_ARM_CM55_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-secure\n" " GCC_ARM_CM55_SECURE - Compiler: GCC Target: ARM Cortex-M55 secure\n" " GCC_ARM_CM55_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" @@ -134,6 +137,9 @@ if(NOT FREERTOS_PORT) " IAR_ARM_CM33_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-secure\n" " IAR_ARM_CM33_SECURE - Compiler: IAR Target: ARM Cortex-M33 secure\n" " IAR_ARM_CM33_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" + " IAR_ARM_CM35P_NONSECURE - Compiler: IAR Target: ARM Cortex-M35P non-secure\n" + " IAR_ARM_CM35P_SECURE - Compiler: IAR Target: ARM Cortex-M35P secure\n" + " IAR_ARM_CM35P_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M35P non-trustzone non-secure\n" " IAR_ARM_CM55_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-secure\n" " IAR_ARM_CM55_SECURE - Compiler: IAR Target: ARM Cortex-M55 secure\n" " IAR_ARM_CM55_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" @@ -217,11 +223,47 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " target_include_directories(freertos_kernel_port\n" " PUBLIC\n" " .)\n" - " taget_link_libraries(freertos_kernel_port\n" + " target_link_libraries(freertos_kernel_port\n" " PRIVATE\n" " freertos_kernel)") endif() +######################################################################## +# Overall Compile Options +# Note the compile option strategy is to error on everything and then +# Per library opt-out of things that are warnings/errors. +# This ensures that no matter what strategy for compilation you take, the +# builds will still occur. +# +# Only tested with GNU and Clang. +# Other options are https://cmake.org/cmake/help/latest/variable/CMAKE_LANG_COMPILER_ID.html#variable:CMAKE_%3CLANG%3E_COMPILER_ID +# Naming of compilers translation map: +# +# FreeRTOS | CMake +# ------------------- +# CCS | ?TBD? +# GCC | GNU, Clang, *Clang Others? +# IAR | IAR +# Keil | ARMCC +# MSVC | MSVC # Note only for MinGW? +# Renesas | ?TBD? + +add_compile_options( + ### Gnu/Clang C Options + $<$:-fdiagnostics-color=always> + $<$:-fcolor-diagnostics> + + $<$:-Wall> + $<$:-Wextra> + $<$:-Wpedantic> + $<$:-Werror> + $<$:-Weverything> + + # TODO: Add in other Compilers here. +) + + +######################################################################## add_subdirectory(portable) add_library(freertos_kernel STATIC @@ -248,3 +290,5 @@ target_link_libraries(freertos_kernel $<$:freertos_config> freertos_kernel_port ) + +######################################################################## diff --git a/README.md b/README.md index 5674872421c..952914daf46 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![CMock Unit Tests](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml/badge.svg?branch=main&event=push)](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml?query=branch%3Amain+event%3Apush+workflow%3A%22CMock+Unit+Tests%22++) +[![codecov](https://codecov.io/gh/FreeRTOS/FreeRTOS-Kernel/badge.svg?branch=main)](https://codecov.io/gh/FreeRTOS/FreeRTOS-Kernel) ## Getting started This repository contains FreeRTOS kernel source/header files and kernel ports only. This repository is referenced as a submodule in [FreeRTOS/FreeRTOS](https://github.com/FreeRTOS/FreeRTOS) repository, which contains pre-configured demo application projects under ```FreeRTOS/Demo``` directory. @@ -19,7 +21,7 @@ Add the following into your project's main or a subdirectory's `CMakeLists.txt`: ```cmake FetchContent_Declare( freertos_kernel GIT_REPOSITORY https://github.com/FreeRTOS/FreeRTOS-Kernel.git - GIT_TAG master #Note: Best practice to use specific git-hash or tagged version + GIT_TAG main #Note: Best practice to use specific git-hash or tagged version ) ``` diff --git a/event_groups.c b/event_groups.c index 364e9f3214c..3143c220fe5 100644 --- a/event_groups.c +++ b/event_groups.c @@ -49,29 +49,34 @@ /* The following bit fields convey control information in a task's event list * item value. It is important they don't clash with the * taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */ -#if configUSE_16_BIT_TICKS == 1 +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U #define eventWAIT_FOR_ALL_BITS 0x0400U #define eventEVENT_BITS_CONTROL_BYTES 0xff00U -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL #define eventWAIT_FOR_ALL_BITS 0x04000000UL #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL -#endif +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100000000000000ULL + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200000000000000ULL + #define eventWAIT_FOR_ALL_BITS 0x0400000000000000ULL + #define eventEVENT_BITS_CONTROL_BYTES 0xff00000000000000ULL +#endif /* if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) */ typedef struct EventGroupDef_t { EventBits_t uxEventBits; - List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */ + List_t xTasksWaitingForBits; /**< List of tasks waiting for a bit to be set. */ #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxEventGroupNumber; #endif #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif } EventGroup_t; @@ -532,15 +537,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ @@ -688,6 +693,42 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) } /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) + { + BaseType_t xReturn; + EventGroup_t * pxEventBits = xEventGroup; + + configASSERT( pxEventBits ); + configASSERT( ppxEventGroupBuffer ); + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Check if the event group was statically allocated. */ + if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdTRUE ) + { + *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* configSUPPORT_DYNAMIC_ALLOCATION */ + { + /* Event group must have been statically allocated. */ + *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits; + xReturn = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + return xReturn; + } +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + /* For internal use only - execute a 'set bits' command that was pended from * an interrupt. */ void vEventGroupSetBitsCallback( void * pvEventGroup, diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 47cf0a47a11..809f5a9c0bf 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -55,9 +55,32 @@ #endif /* *INDENT-ON* */ +/* Acceptable values for configTICK_TYPE_WIDTH_IN_BITS. */ +#define TICK_TYPE_WIDTH_16_BITS 0 +#define TICK_TYPE_WIDTH_32_BITS 1 +#define TICK_TYPE_WIDTH_64_BITS 2 + /* Application specific configuration options. */ #include "FreeRTOSConfig.h" +#if !defined( configUSE_16_BIT_TICKS ) && !defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#if defined( configUSE_16_BIT_TICKS ) && defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +/* Define configTICK_TYPE_WIDTH_IN_BITS according to the + * value of configUSE_16_BIT_TICKS for backward compatibility. */ +#ifndef configTICK_TYPE_WIDTH_IN_BITS + #if ( configUSE_16_BIT_TICKS == 1 ) + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_16_BITS + #else + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_32_BITS + #endif +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" @@ -72,41 +95,26 @@ /* Required if struct _reent is used. */ #if ( configUSE_NEWLIB_REENTRANT == 1 ) -/* Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. - * - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - #include + #include "newlib-freertos.h" - #define configUSE_C_RUNTIME_TLS_SUPPORT 1 +#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */ - #ifndef configTLS_BLOCK_TYPE - #define configTLS_BLOCK_TYPE struct _reent - #endif +/* Must be defaulted before configUSE_PICOLIBC_TLS is used below. */ +#ifndef configUSE_PICOLIBC_TLS + #define configUSE_PICOLIBC_TLS 0 +#endif - #ifndef configINIT_TLS_BLOCK - #define configINIT_TLS_BLOCK( xTLSBlock ) _REENT_INIT_PTR( &( xTLSBlock ) ) - #endif +#if ( configUSE_PICOLIBC_TLS == 1 ) - #ifndef configSET_TLS_BLOCK - #define configSET_TLS_BLOCK( xTLSBlock ) ( _impure_ptr = &( xTLSBlock ) ) - #endif + #include "picolibc-freertos.h" - #ifndef configDEINIT_TLS_BLOCK - #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) - #endif -#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */ +#endif /* if ( configUSE_PICOLIBC_TLS == 1 ) */ #ifndef configUSE_C_RUNTIME_TLS_SUPPORT #define configUSE_C_RUNTIME_TLS_SUPPORT 0 #endif -#if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) +#if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) #ifndef configTLS_BLOCK_TYPE #error Missing definition: configTLS_BLOCK_TYPE must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. @@ -123,7 +131,7 @@ #ifndef configDEINIT_TLS_BLOCK #error Missing definition: configDEINIT_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. #endif -#endif /* if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) */ +#endif /* if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) */ /* * Check all the required application specific macros have been defined. @@ -161,8 +169,10 @@ #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif -#ifndef configUSE_16_BIT_TICKS - #error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#if ( ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_16_BITS ) && \ + ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_32_BITS ) && \ + ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_64_BITS ) ) + #error Macro configTICK_TYPE_WIDTH_IN_BITS is defined to incorrect value. See the Configuration section of the FreeRTOS API documentation for details. #endif #ifndef INCLUDE_vTaskPrioritySet @@ -1001,6 +1011,10 @@ #define portDONT_DISCARD #endif +#ifndef portNORETURN + #define portNORETURN +#endif + #ifndef configUSE_TIME_SLICING #define configUSE_TIME_SLICING 1 #endif @@ -1420,7 +1434,7 @@ typedef struct xSTATIC_TCB #if ( configGENERATE_RUN_TIME_STATS == 1 ) configRUN_TIME_COUNTER_TYPE ulDummy16; #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) configTLS_BLOCK_TYPE xDummy17; #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/include/event_groups.h b/include/event_groups.h index 949ddd9143f..47572ce94e4 100644 --- a/include/event_groups.h +++ b/include/event_groups.h @@ -84,8 +84,8 @@ typedef struct EventGroupDef_t * EventGroupHandle_t; /* * The type that holds event bits always matches TickType_t - therefore the - * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, - * 32 bits if set to 0. + * number of bits it holds is set by configTICK_TYPE_WIDTH_IN_BITS (16 bits if set to 0, + * 32 bits if set to 1, 64 bits if set to 2. * * \defgroup EventBits_t EventBits_t * \ingroup EventGroup @@ -112,11 +112,12 @@ typedef TickType_t EventBits_t; * * Although event groups are not related to ticks, for internal implementation * reasons the number of bits available for use in an event group is dependent - * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If - * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit - * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has - * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store - * event bits within an event group. + * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If + * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has + * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then + * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type + * is used to store event bits within an event group. * * @return If the event group was created then a handle to the event group is * returned. If there was insufficient FreeRTOS heap available to create the @@ -168,11 +169,12 @@ typedef TickType_t EventBits_t; * * Although event groups are not related to ticks, for internal implementation * reasons the number of bits available for use in an event group is dependent - * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If - * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit - * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has - * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store - * event bits within an event group. + * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If + * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has + * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then + * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type + * is used to store event bits within an event group. * * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type * StaticEventGroup_t, which will be then be used to hold the event group's data @@ -761,6 +763,28 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEG */ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; +/** + * event_groups.h + * @code{c} + * BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + * StaticEventGroup_t ** ppxEventGroupBuffer ); + * @endcode + * + * Retrieve a pointer to a statically created event groups's data structure + * buffer. It is the same buffer that is supplied at the time of creation. + * + * @param xEventGroup The event group for which to retrieve the buffer. + * + * @param ppxEventGroupBuffer Used to return a pointer to the event groups's + * data structure buffer. + * + * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /* For internal use only. */ void vEventGroupSetBitsCallback( void * pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; diff --git a/include/list.h b/include/list.h index 11241b68687..c86eb7165f8 100644 --- a/include/list.h +++ b/include/list.h @@ -143,20 +143,20 @@ struct xLIST; struct xLIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ - configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in ascending order. */ - struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ - struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ - void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ - struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ - listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /**< The value being listed. In most cases this is used to sort the list in ascending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /**< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /**< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /**< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /**< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ }; typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ #if ( configUSE_MINI_LIST_ITEM == 1 ) struct xMINI_LIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ configLIST_VOLATILE TickType_t xItemValue; struct xLIST_ITEM * configLIST_VOLATILE pxNext; struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; @@ -171,11 +171,11 @@ typedef struct xLIST_ITEM ListItem_t; /* For some reason lint */ typedef struct xLIST { - listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ volatile UBaseType_t uxNumberOfItems; - ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ - MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ - listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + ListItem_t * configLIST_VOLATILE pxIndex; /**< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /**< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ } List_t; /* @@ -283,17 +283,17 @@ typedef struct xLIST * \ingroup LinkedList */ #define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ - { \ + do { \ List_t * const pxConstList = ( pxList ); \ /* Increment the index to the next item and return the item, ensuring */ \ /* we don't return the marker used at the end of the list. */ \ ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ { \ - ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + ( pxConstList )->pxIndex = ( pxConstList )->xListEnd.pxNext; \ } \ ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ - } + } while( 0 ) /* * Version of uxListRemove() that does not return a value. Provided as a slight @@ -312,7 +312,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listREMOVE_ITEM( pxItemToRemove ) \ - { \ + do { \ /* The list item knows which list it is in. Obtain the list from the list \ * item. */ \ List_t * const pxList = ( pxItemToRemove )->pxContainer; \ @@ -327,7 +327,7 @@ typedef struct xLIST \ ( pxItemToRemove )->pxContainer = NULL; \ ( pxList->uxNumberOfItems )--; \ - } + } while( 0 ) /* * Inline version of vListInsertEnd() to provide slight optimisation for @@ -352,7 +352,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listINSERT_END( pxList, pxNewListItem ) \ - { \ + do { \ ListItem_t * const pxIndex = ( pxList )->pxIndex; \ \ /* Only effective when configASSERT() is also defined, these tests may catch \ @@ -374,7 +374,7 @@ typedef struct xLIST ( pxNewListItem )->pxContainer = ( pxList ); \ \ ( ( pxList )->uxNumberOfItems )++; \ - } + } while( 0 ) /* * Access function to obtain the owner of the first entry in a list. Lists diff --git a/include/message_buffer.h b/include/message_buffer.h index b56dd35964e..74fab118f51 100644 --- a/include/message_buffer.h +++ b/include/message_buffer.h @@ -245,6 +245,37 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), 0, pdTRUE, ( pucMessageBufferStorageArea ), ( pxStaticMessageBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif +/** + * message_buffer.h + * + * @code{c} + * BaseType_t xMessageBufferGetStaticBuffers( MessageBufferHandle_t xMessageBuffer, + * uint8_t ** ppucMessageBufferStorageArea, + * StaticMessageBuffer_t ** ppxStaticMessageBuffer ); + * @endcode + * + * Retrieve pointers to a statically created message buffer's data structure + * buffer and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xMessageBuffer The message buffer for which to retrieve the buffers. + * + * @param ppucMessageBufferStorageArea Used to return a pointer to the + * message buffer's storage area buffer. + * + * @param ppxStaticMessageBuffer Used to return a pointer to the message + * buffer's data structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise.. + * + * \defgroup xMessageBufferGetStaticBuffers xMessageBufferGetStaticBuffers + * \ingroup MessageBufferManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xMessageBufferGetStaticBuffers( xMessageBuffer, ppucMessageBufferStorageArea, ppxStaticMessageBuffer ) \ + xStreamBufferGetStaticBuffers( ( xMessageBuffer ), ( ppucMessageBufferStorageArea ), ( ppxStaticMessageBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * message_buffer.h * diff --git a/include/newlib-freertos.h b/include/newlib-freertos.h new file mode 100644 index 00000000000..497ca529990 --- /dev/null +++ b/include/newlib-freertos.h @@ -0,0 +1,62 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef INC_NEWLIB_FREERTOS_H +#define INC_NEWLIB_FREERTOS_H + +/* Note Newlib support has been included by popular demand, but is not + * used by the FreeRTOS maintainers themselves. FreeRTOS is not + * responsible for resulting newlib operation. User must be familiar with + * newlib and must provide system-wide implementations of the necessary + * stubs. Be warned that (at the time of writing) the current newlib design + * implements a system-wide malloc() that must be provided with locks. + * + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + +#include + +#define configUSE_C_RUNTIME_TLS_SUPPORT 1 + +#ifndef configTLS_BLOCK_TYPE + #define configTLS_BLOCK_TYPE struct _reent +#endif + +#ifndef configINIT_TLS_BLOCK + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) _REENT_INIT_PTR( &( xTLSBlock ) ) +#endif + +#ifndef configSET_TLS_BLOCK + #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock ) +#endif + +#ifndef configDEINIT_TLS_BLOCK + #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) +#endif + +#endif /* INC_NEWLIB_FREERTOS_H */ diff --git a/include/picolibc-freertos.h b/include/picolibc-freertos.h new file mode 100644 index 00000000000..467f7a97091 --- /dev/null +++ b/include/picolibc-freertos.h @@ -0,0 +1,90 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef INC_PICOLIBC_FREERTOS_H +#define INC_PICOLIBC_FREERTOS_H + +/* Use picolibc TLS support to allocate space for __thread variables, + * initialize them at thread creation and set the TLS context at + * thread switch time. + * + * See the picolibc TLS docs: + * https://github.com/picolibc/picolibc/blob/main/doc/tls.md + * for additional information. */ + +#include + +#define configUSE_C_RUNTIME_TLS_SUPPORT 1 + +#define configTLS_BLOCK_TYPE void * + +#define picolibcTLS_SIZE ( ( portPOINTER_SIZE_TYPE ) _tls_size() ) +#define picolibcSTACK_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) + +#if __PICOLIBC_MAJOR__ > 1 || __PICOLIBC_MINOR__ >= 8 + +/* Picolibc 1.8 and newer have explicit alignment values provided + * by the _tls_align() inline */ + #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) ( _tls_align() - 1 ) ) +#else + +/* For older Picolibc versions, use the general port alignment value */ + #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) +#endif + +/* Allocate thread local storage block off the end of the +* stack. The _tls_size() function returns the size (in +* bytes) of the total TLS area used by the application */ +#if ( portSTACK_GROWTH < 0 ) + + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) \ + - picolibcTLS_SIZE ) & ~ \ + configMAX( picolibcSTACK_ALIGNMENT_MASK, \ + picolibcTLS_ALIGNMENT_MASK ) ); \ + xTLSBlock = pxTopOfStack; \ + _init_tls( xTLSBlock ); \ + } while( 0 ) +#else /* portSTACK_GROWTH */ + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + xTLSBlock = ( void * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack + \ + picolibcTLS_ALIGNMENT_MASK ) & ~picolibcTLS_ALIGNMENT_MASK ); \ + pxTopOfStack = ( StackType_t * ) ( ( ( ( ( portPOINTER_SIZE_TYPE ) xTLSBlock ) + \ + picolibcTLS_SIZE ) + picolibcSTACK_ALIGNMENT_MASK ) & \ + ~picolibcSTACK_ALIGNMENT_MASK ); \ + _init_tls( xTLSBlock ); \ + } while( 0 ) +#endif /* portSTACK_GROWTH */ + +#define configSET_TLS_BLOCK( xTLSBlock ) _set_tls( xTLSBlock ) + +#define configDEINIT_TLS_BLOCK( xTLSBlock ) + +#endif /* INC_PICOLIBC_FREERTOS_H */ diff --git a/include/projdefs.h b/include/projdefs.h index b4b8c14ff79..c81ad568436 100644 --- a/include/projdefs.h +++ b/include/projdefs.h @@ -44,6 +44,10 @@ typedef void (* TaskFunction_t)( void * ); #define pdFALSE ( ( BaseType_t ) 0 ) #define pdTRUE ( ( BaseType_t ) 1 ) +#define pdFALSE_SIGNED ( ( BaseType_t ) 0 ) +#define pdTRUE_SIGNED ( ( BaseType_t ) 1 ) +#define pdFALSE_UNSIGNED ( ( UBaseType_t ) 0 ) +#define pdTRUE_UNSIGNED ( ( UBaseType_t ) 1 ) #define pdPASS ( pdTRUE ) #define pdFAIL ( pdFALSE ) @@ -60,10 +64,14 @@ typedef void (* TaskFunction_t)( void * ); #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 #endif -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define pdINTEGRITY_CHECK_VALUE 0x5a5a -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5a5a5a5a5aULL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /* The following errno values are used by FreeRTOS+ components, not FreeRTOS @@ -96,6 +104,7 @@ typedef void (* TaskFunction_t)( void * ); #define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ #define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ #define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_EAFNOSUPPORT 97 /* Address family not supported by protocol */ #define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ #define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ #define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ diff --git a/include/queue.h b/include/queue.h index 02356766abb..66c8286aef0 100644 --- a/include/queue.h +++ b/include/queue.h @@ -235,6 +235,35 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t; #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) #endif /* configSUPPORT_STATIC_ALLOCATION */ +/** + * queue. h + * @code{c} + * BaseType_t xQueueGetStaticBuffers( QueueHandle_t xQueue, + * uint8_t ** ppucQueueStorage, + * StaticQueue_t ** ppxStaticQueue ); + * @endcode + * + * Retrieve pointers to a statically created queue's data structure buffer + * and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xQueue The queue for which to retrieve the buffers. + * + * @param ppucQueueStorage Used to return a pointer to the queue's storage + * area buffer. + * + * @param ppxStaticQueue Used to return a pointer to the queue's data + * structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xQueueGetStaticBuffers xQueueGetStaticBuffers + * \ingroup QueueManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue ) xQueueGenericGetStaticBuffers( ( xQueue ), ( ppucQueueStorage ), ( ppxStaticQueue ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * queue. h * @code{c} @@ -1346,9 +1375,9 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * @param pvBuffer Pointer to the buffer into which the received item will * be copied. * - * @param pxTaskWoken A task may be blocked waiting for space to become - * available on the queue. If xQueueReceiveFromISR causes such a task to - * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * @param pxHigherPriorityTaskWoken A task may be blocked waiting for space to + * become available on the queue. If xQueueReceiveFromISR causes such a task + * to unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will * remain unchanged. * * @return pdTRUE if an item was successfully received from the queue, @@ -1542,6 +1571,18 @@ BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; #endif +/* + * Generic version of the function used to retrieve the buffers of statically + * created queues. This is called by other functions and macros that retrieve + * the buffers of other statically created RTOS objects that use the queue + * structure as their base. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION; +#endif + /* * Queue sets provide a mechanism to allow a task to block (pend) on a read * operation from multiple queues or semaphores simultaneously. diff --git a/include/semphr.h b/include/semphr.h index c2206fa98ff..7977a01b84e 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -1190,4 +1190,25 @@ typedef QueueHandle_t SemaphoreHandle_t; */ #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) ) +/** + * semphr.h + * @code{c} + * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore ); + * @endcode + * + * Retrieve pointer to a statically created binary semaphore, counting semaphore, + * or mutex semaphore's data structure buffer. This is the same buffer that is + * supplied at the time of creation. + * + * @param xSemaphore The semaphore for which to retrieve the buffer. + * + * @param ppxSemaphoreBuffer Used to return a pointer to the semaphore's + * data structure buffer. + * + * @return pdTRUE if buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreGetStaticBuffer( xSemaphore, ppxSemaphoreBuffer ) xQueueGenericGetStaticBuffers( ( QueueHandle_t ) ( xSemaphore ), NULL, ( ppxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + #endif /* SEMAPHORE_H */ diff --git a/include/stack_macros.h b/include/stack_macros.h index 9b36959baea..7ffc7b34338 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -87,7 +87,7 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ \ @@ -98,7 +98,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/include/stream_buffer.h b/include/stream_buffer.h index d65ed9e9cec..521c178ef5a 100644 --- a/include/stream_buffer.h +++ b/include/stream_buffer.h @@ -260,6 +260,38 @@ typedef void (* StreamBufferCallbackFunction_t)( StreamBufferHandle_t xStreamBuf xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, ( pucStreamBufferStorageArea ), ( pxStaticStreamBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif +/** + * stream_buffer.h + * + * @code{c} + * BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + * uint8_t ** ppucStreamBufferStorageArea, + * StaticStreamBuffer_t ** ppxStaticStreamBuffer ); + * @endcode + * + * Retrieve pointers to a statically created stream buffer's data structure + * buffer and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xStreamBuffer The stream buffer for which to retrieve the buffers. + * + * @param ppucStreamBufferStorageArea Used to return a pointer to the stream + * buffer's storage area buffer. + * + * @param ppxStaticStreamBuffer Used to return a pointer to the stream + * buffer's data structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xStreamBufferGetStaticBuffers xStreamBufferGetStaticBuffers + * \ingroup StreamBufferManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + uint8_t ** ppucStreamBufferStorageArea, + StaticStreamBuffer_t ** ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * stream_buffer.h * diff --git a/include/task.h b/include/task.h index 458c46503d9..8745a1d6481 100644 --- a/include/task.h +++ b/include/task.h @@ -711,7 +711,7 @@ typedef enum * * @param xTask The handle of the task being updated. * - * @param xRegions A pointer to a MemoryRegion_t structure that contains the + * @param[in] pxRegions A pointer to a MemoryRegion_t structure that contains the * new memory region definitions. * * Example usage: @@ -1720,6 +1720,36 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e */ TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +/** + * task. h + * @code{c} + * BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + * StackType_t ** ppuxStackBuffer, + * StaticTask_t ** ppxTaskBuffer ); + * @endcode + * + * Retrieve pointers to a statically created task's data structure + * buffer and stack buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xTask The task for which to retrieve the buffers. + * + * @param ppuxStackBuffer Used to return a pointer to the task's stack buffer. + * + * @param ppxTaskBuffer Used to return a pointer to the task's data structure + * buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xTaskGetStaticBuffers xTaskGetStaticBuffers + * \ingroup TaskUtils + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * task.h * @code{c} @@ -1845,7 +1875,7 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL /** * task.h * @code{c} - * void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName); + * void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName); * @endcode * * The application stack overflow hook is called when a stack overflow is detected for a task. @@ -1860,7 +1890,25 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif -#if ( configUSE_TICK_HOOK > 0 ) +#if ( configUSE_IDLE_HOOK == 1 ) + +/** + * task.h + * @code{c} + * void vApplicationIdleHook( void ); + * @endcode + * + * The application idle hook is called by the idle task. + * This allows the application designer to add background functionality without + * the overhead of a separate task. + * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, CALL A FUNCTION THAT MIGHT BLOCK. + */ + void vApplicationIdleHook( void ); + +#endif + + +#if ( configUSE_TICK_HOOK != 0 ) /** * task.h @@ -2127,6 +2175,42 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unquali */ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +/** + * task. h + * @code{c} + * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ); + * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ); + * @endcode + * + * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be + * available. The application must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and + * return the timers current count value respectively. The counter should be + * at least 10 times the frequency of the tick count. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total + * execution time of each task into a buffer, ulTaskGetRunTimeCounter() + * returns the total execution time of just one task and + * ulTaskGetRunTimePercent() returns the percentage of the CPU time used by + * just one task. + * + * @return The total run time of the given task or the percentage of the total + * run time consumed by the given task. This is the amount of time the task + * has actually been executing. The unit of time is dependent on the frequency + * configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() macros. + * + * \defgroup ulTaskGetRunTimeCounter ulTaskGetRunTimeCounter + * \ingroup TaskUtils + */ +configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + /** * task. h * @code{c} @@ -2134,13 +2218,12 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e * configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ); * @endcode * - * configGENERATE_RUN_TIME_STATS, configUSE_STATS_FORMATTING_FUNCTIONS and - * INCLUDE_xTaskGetIdleTaskHandle must all be defined as 1 for these functions - * to be available. The application must also then provide definitions for - * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() - * to configure a peripheral timer/counter and return the timers current count - * value respectively. The counter should be at least 10 times the frequency of - * the tick count. + * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be + * available. The application must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and + * return the timers current count value respectively. The counter should be + * at least 10 times the frequency of the tick count. * * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total * accumulated execution time being stored for each task. The resolution diff --git a/include/timers.h b/include/timers.h index 6bbb9f687de..054ec097fab 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1323,6 +1323,26 @@ TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; */ TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +/** + * BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + * StaticTimer_t ** ppxTimerBuffer ); + * + * Retrieve pointer to a statically created timer's data structure + * buffer. This is the same buffer that is supplied at the time of + * creation. + * + * @param xTimer The timer for which to retrieve the buffer. + * + * @param ppxTaskBuffer Used to return a pointer to the timers's data + * structure buffer. + * + * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /* * Functions beyond this part are not part of the public API and are intended * for use by the kernel only. @@ -1378,6 +1398,20 @@ BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, #endif +#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 ) + +/** + * timers.h + * @code{c} + * void vApplicationDaemonTaskStartupHook( void ); + * @endcode + * + * This hook function is called form the timer task once when the task starts running. + */ + void vApplicationDaemonTaskStartupHook( void ); + +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index ebe8009703c..d064969809c 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -33,8 +33,8 @@ _FREERTOS_PORTABLE_DIRECTORY_ = os.path.dirname(_THIS_FILE_DIRECTORY_) _COMPILERS_ = ['GCC', 'IAR'] -_ARCH_NS_ = ['ARM_CM85', 'ARM_CM85_NTZ', 'ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] -_ARCH_S_ = ['ARM_CM85', 'ARM_CM55', 'ARM_CM33', 'ARM_CM23'] +_ARCH_NS_ = ['ARM_CM85', 'ARM_CM85_NTZ', 'ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM35P', 'ARM_CM35P_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] +_ARCH_S_ = ['ARM_CM85', 'ARM_CM55', 'ARM_CM35P', 'ARM_CM33', 'ARM_CM23'] # Files to be compiled in the Secure Project _SECURE_COMMON_FILE_PATHS_ = [ @@ -46,16 +46,18 @@ _SECURE_PORTABLE_FILE_PATHS_ = { 'GCC':{ - 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], - 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], - 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] + 'ARM_CM23' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], + 'ARM_CM33' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM35P':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM55' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM85' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] }, 'IAR':{ - 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], - 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], - 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] + 'ARM_CM23' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], + 'ARM_CM33' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM35P':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM55' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM85' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] } } @@ -70,6 +72,10 @@ 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM23_NTZ')], 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')], + 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], + 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), @@ -84,6 +90,10 @@ 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM23_NTZ')], 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')], + 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], + 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index f98b8f277cb..c6dad99857c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index f98b8f277cb..c6dad99857c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 943c665cc43..4fe8c59147a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 943c665cc43..4fe8c59147a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index b654748e138..adb47d8420f 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index 830fa2c1379..fec6923394c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/BCC/16BitDOS/Flsh186/prtmacro.h b/portable/BCC/16BitDOS/Flsh186/prtmacro.h index 5aaebaa4d8b..295c0bc736e 100644 --- a/portable/BCC/16BitDOS/Flsh186/prtmacro.h +++ b/portable/BCC/16BitDOS/Flsh186/prtmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/BCC/16BitDOS/PC/prtmacro.h b/portable/BCC/16BitDOS/PC/prtmacro.h index 1d26c5bf416..5fb4ed6a497 100644 --- a/portable/BCC/16BitDOS/PC/prtmacro.h +++ b/portable/BCC/16BitDOS/PC/prtmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c old mode 100644 new mode 100755 index 80e0b0a2410..ef5fa5b9340 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -52,8 +52,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -217,7 +218,8 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -227,7 +229,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -239,22 +241,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -263,7 +299,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -274,7 +310,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -370,9 +406,9 @@ void xPortSysTickHandler( void ) /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* If a context switch is pending or a task is waiting for the scheduler * to be unsuspended then abandon the low power entry. */ @@ -380,7 +416,7 @@ void xPortSysTickHandler( void ) { /* Re-enable interrupts - see comments above the cpsid instruction * above. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } else { @@ -441,9 +477,9 @@ void xPortSysTickHandler( void ) if( xModifiableIdleTime > 0 ) { - __asm( " dsb"); - __asm( " wfi"); - __asm( " isb"); + __asm( " dsb" ); + __asm( " wfi" ); + __asm( " isb" ); } configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); @@ -451,17 +487,17 @@ void xPortSysTickHandler( void ) /* Re-enable interrupts to allow the interrupt that brought the MCU * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ - __asm( " cpsie i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsie i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable interrupts again because the clock is about to be stopped * and interrupts that execute while the clock is stopped will increase * any slippage between the time maintained by the RTOS and calendar * time. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the @@ -569,7 +605,7 @@ void xPortSysTickHandler( void ) vTaskStepTick( ulCompleteTickPeriods ); /* Exit with interrupts enabled. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } } diff --git a/portable/CCS/ARM_CM3/portmacro.h b/portable/CCS/ARM_CM3/portmacro.h index 5b5f4fbac07..7ac1d709caf 100644 --- a/portable/CCS/ARM_CM3/portmacro.h +++ b/portable/CCS/ARM_CM3/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c old mode 100644 new mode 100755 index 4006191705e..c43cf0ef314 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -56,8 +56,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -236,7 +237,8 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -246,7 +248,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -258,22 +260,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -282,7 +318,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -293,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -395,9 +431,9 @@ void xPortSysTickHandler( void ) /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* If a context switch is pending or a task is waiting for the scheduler * to be unsuspended then abandon the low power entry. */ @@ -405,7 +441,7 @@ void xPortSysTickHandler( void ) { /* Re-enable interrupts - see comments above the cpsid instruction * above. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } else { @@ -466,9 +502,9 @@ void xPortSysTickHandler( void ) if( xModifiableIdleTime > 0 ) { - __asm( " dsb"); - __asm( " wfi"); - __asm( " isb"); + __asm( " dsb" ); + __asm( " wfi" ); + __asm( " isb" ); } configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); @@ -476,17 +512,17 @@ void xPortSysTickHandler( void ) /* Re-enable interrupts to allow the interrupt that brought the MCU * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ - __asm( " cpsie i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsie i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable interrupts again because the clock is about to be stopped * and interrupts that execute while the clock is stopped will increase * any slippage between the time maintained by the RTOS and calendar * time. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the @@ -594,7 +630,7 @@ void xPortSysTickHandler( void ) vTaskStepTick( ulCompleteTickPeriods ); /* Exit with interrupts enabled. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } } diff --git a/portable/CCS/ARM_CM4F/portmacro.h b/portable/CCS/ARM_CM4F/portmacro.h index 9e78588ad94..34988c223f5 100644 --- a/portable/CCS/ARM_CM4F/portmacro.h +++ b/portable/CCS/ARM_CM4F/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_Cortex-R4/portmacro.h b/portable/CCS/ARM_Cortex-R4/portmacro.h index 5d9e91d66a9..07c1827cb7a 100644 --- a/portable/CCS/ARM_Cortex-R4/portmacro.h +++ b/portable/CCS/ARM_Cortex-R4/portmacro.h @@ -52,16 +52,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if (configUSE_16_BIT_TICKS == 1) +#if (configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS) typedef uint16_t TickType_t; #define portMAX_DELAY (TickType_t) 0xFFFF -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY (TickType_t) 0xFFFFFFFFF /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/CCS/MSP430X/portmacro.h b/portable/CCS/MSP430X/portmacro.h index 1d651c19410..064b0503ae4 100644 --- a/portable/CCS/MSP430X/portmacro.h +++ b/portable/CCS/MSP430X/portmacro.h @@ -62,12 +62,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index ae9de9cfe1b..2824df05dc0 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -123,6 +123,20 @@ add_library(freertos_kernel_port STATIC GCC/ARM_CM33_NTZ/non_secure/portasm.c ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c> + $<$: + GCC/ARM_CM35P/non_secure/port.c + GCC/ARM_CM35P/non_secure/portasm.c> + + $<$: + GCC/ARM_CM35P/secure/secure_context_port.c + GCC/ARM_CM35P/secure/secure_context.c + GCC/ARM_CM35P/secure/secure_heap.c + GCC/ARM_CM35P/secure/secure_init.c> + + $<$: + GCC/ARM_CM35P_NTZ/non_secure/port.c + GCC/ARM_CM35P_NTZ/non_secure/portasm.c> + # ARMv8.1-M ports for GCC $<$: GCC/ARM_CM55/non_secure/port.c @@ -397,6 +411,20 @@ add_library(freertos_kernel_port STATIC IAR/ARM_CM33_NTZ/non_secure/port.c IAR/ARM_CM33_NTZ/non_secure/portasm.s> + $<$: + IAR/ARM_CM35P/non_secure/port.c + IAR/ARM_CM35P/non_secure/portasm.s> + + $<$: + IAR/ARM_CM35P/secure/secure_context_port_asm.s + IAR/ARM_CM35P/secure/secure_context.c + IAR/ARM_CM35P/secure/secure_heap.c + IAR/ARM_CM35P/secure/secure_init.c> + + $<$: + IAR/ARM_CM35P_NTZ/non_secure/port.c + IAR/ARM_CM35P_NTZ/non_secure/portasm.s> + # ARMv8.1-M ports for IAR EWARM $<$: IAR/ARM_CM55/non_secure/port.c @@ -738,6 +766,10 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P_NTZ/non_secure> + # ARMv8.1-M ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/secure> @@ -860,6 +892,10 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P_NTZ/non_secure> + # ARMv8.1-M ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/secure> diff --git a/portable/CodeWarrior/ColdFire_V1/portmacro.h b/portable/CodeWarrior/ColdFire_V1/portmacro.h index eca83debbab..edae69ab2b2 100644 --- a/portable/CodeWarrior/ColdFire_V1/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V1/portmacro.h @@ -57,12 +57,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CodeWarrior/ColdFire_V2/portmacro.h b/portable/CodeWarrior/ColdFire_V2/portmacro.h index 634d7404202..665ae4b304c 100644 --- a/portable/CodeWarrior/ColdFire_V2/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V2/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CodeWarrior/HCS12/portmacro.h b/portable/CodeWarrior/HCS12/portmacro.h index 2fd14ae4e01..d0d0a140eb4 100644 --- a/portable/CodeWarrior/HCS12/portmacro.h +++ b/portable/CodeWarrior/HCS12/portmacro.h @@ -53,12 +53,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_AT91FR40008/portmacro.h b/portable/GCC/ARM7_AT91FR40008/portmacro.h index 8a1666e1319..69ee70f078c 100644 --- a/portable/GCC/ARM7_AT91FR40008/portmacro.h +++ b/portable/GCC/ARM7_AT91FR40008/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_AT91SAM7S/portmacro.h b/portable/GCC/ARM7_AT91SAM7S/portmacro.h index 4f514258805..1a440a68c5f 100644 --- a/portable/GCC/ARM7_AT91SAM7S/portmacro.h +++ b/portable/GCC/ARM7_AT91SAM7S/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_LPC2000/portmacro.h b/portable/GCC/ARM7_LPC2000/portmacro.h index 84029962b58..50922065441 100644 --- a/portable/GCC/ARM7_LPC2000/portmacro.h +++ b/portable/GCC/ARM7_LPC2000/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_LPC23xx/portmacro.h b/portable/GCC/ARM7_LPC23xx/portmacro.h index c69003ca8d0..768e86dbed1 100644 --- a/portable/GCC/ARM7_LPC23xx/portmacro.h +++ b/portable/GCC/ARM7_LPC23xx/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CA53_64_BIT/port.c b/portable/GCC/ARM_CA53_64_BIT/port.c index 3e686e33811..545bac15d86 100644 --- a/portable/GCC/ARM_CA53_64_BIT/port.c +++ b/portable/GCC/ARM_CA53_64_BIT/port.c @@ -270,14 +270,14 @@ uint32_t ulAPSR; #if( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); volatile uint8_t ucMaxPriorityValue; /* Determine how many priority bits are implemented in the GIC. Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all possible bits. */ @@ -300,7 +300,7 @@ uint32_t ulAPSR; /* Restore the clobbered interrupt priority register to its original value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CA9/port.c b/portable/GCC/ARM_CA9/port.c index 75e69f876a2..8c74214117c 100644 --- a/portable/GCC/ARM_CA9/port.c +++ b/portable/GCC/ARM_CA9/port.c @@ -329,14 +329,14 @@ uint32_t ulAPSR; #if( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); volatile uint8_t ucMaxPriorityValue; /* Determine how many priority bits are implemented in the GIC. Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all possible bits. */ @@ -357,7 +357,7 @@ uint32_t ulAPSR; /* Restore the clobbered interrupt priority register to its original value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index a89c8ba42d0..408162d6402 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -75,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c index 5435439c5fe..44f159af1fa 100644 --- a/portable/GCC/ARM_CM23/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23/non_secure/portasm.c @@ -367,7 +367,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ " str r0, [r3] \n"/* Restore the task's xSecureContext. */ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " push {r2, r4} \n" " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index f98b8f277cb..c6dad99857c 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index f98b8f277cb..c6dad99857c 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c old mode 100644 new mode 100755 index 7f650fd360d..4aa1f2425d7 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -34,13 +34,6 @@ #include "FreeRTOS.h" #include "task.h" -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.4.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) @@ -55,8 +48,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -265,13 +259,10 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -281,7 +272,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -293,22 +284,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -317,7 +342,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -328,7 +353,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -758,4 +783,4 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); } -#endif /* configASSERT_DEFINED */ +#endif /* configASSERT_DEFINED */ \ No newline at end of file diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index a7b45a5867a..dd729f13f2c 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -75,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 943c665cc43..4fe8c59147a 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 943c665cc43..4fe8c59147a 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.c b/portable/GCC/ARM_CM35P/non_secure/portasm.c new file mode 100644 index 00000000000..9f9b2e68d39 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.c @@ -0,0 +1,470 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r2] \n"/* Program RNR = 4. */ + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + " ldr r5, xSecureContextConst2 \n" + " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " msr control, r3 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r4 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + "xSecureContextConst2: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " mrs r2, psp \n"/* Read PSP in r2. */ + " \n" + " cbz r0, save_ns_context \n"/* No secure context to save. */ + " push {r0-r2, r14} \n" + " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n"/* LR is now in r3. */ + " mov lr, r3 \n"/* LR = r3. */ + " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " b select_next_task \n" + " \n" + " save_ns_context: \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r3] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ + " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r3] \n"/* Program MAIR0. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r3] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r3] \n"/* Program RNR = 8. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r3] \n"/* Program RNR = 12. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r3] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #else /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #endif /* configENABLE_MPU */ + " \n" + " restore_ns_context: \n" + " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + "xSecureContextConst: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ + +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " svc %0 \n"/* Secure context is allocated in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ + " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ + " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ + " it ne \n" + " svcne %0 \n"/* Secure context is freed in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.h b/portable/GCC/ARM_CM35P/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context.c b/portable/GCC/ARM_CM35P/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context.h b/portable/GCC/ARM_CM35P/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context_port.c b/portable/GCC/ARM_CM35P/secure/secure_context_port.c new file mode 100644 index 00000000000..13520870bca --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context_port.c @@ -0,0 +1,97 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + " msr control, r3 \n" /* CONTROL = r3. */ + #endif /* configENABLE_MPU */ + " \n" + " msr psplim, r2 \n" /* PSPLIM = r2. */ + " msr psp, r1 \n" /* PSP = r1. */ + " \n" + " load_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::: "r0", "r1", "r2" + ); +} +/*-----------------------------------------------------------*/ + +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " mrs r1, psp \n" /* r1 = PSP. */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " mrs r2, control \n" /* r2 = CONTROL. */ + " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */ + " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */ + " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + " \n" + " save_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::"i" ( securecontextNO_STACK ) : "r1", "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_heap.c b/portable/GCC/ARM_CM35P/secure/secure_heap.c new file mode 100644 index 00000000000..157fdbf0eec --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_heap.c @@ -0,0 +1,454 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_heap.h b/portable/GCC/ARM_CM35P/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_init.c b/portable/GCC/ARM_CM35P/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_init.h b/portable/GCC/ARM_CM35P/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_port_macros.h b/portable/GCC/ARM_CM35P/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c new file mode 100644 index 00000000000..a78529d04d9 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c @@ -0,0 +1,365 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " msr control, r2 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, psp \n"/* Read PSP in r0. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r2, control \n"/* r2 = CONTROL. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ + #else /* configENABLE_MPU */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ + #endif /* configENABLE_MPU */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ + #else /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ + #else /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + #endif /* configENABLE_MPU */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " bx r3 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c old mode 100644 new mode 100755 index e0f1d170154..e33df014c0e --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -83,8 +83,9 @@ /* Constants required to access and manipulate the SysTick. */ #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to set up the initial stack. */ @@ -381,13 +382,10 @@ static void prvRestoreContextOfFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See - * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -397,7 +395,7 @@ BaseType_t xPortStartScheduler( void ) * to ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -409,22 +407,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -433,7 +465,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -444,7 +476,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -918,4 +950,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ +/*-----------------------------------------------------------*/ \ No newline at end of file diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index 693fc7b9091..25058026973 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -111,6 +113,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c old mode 100644 new mode 100755 index b946e6e9435..fd9e6dbb8f2 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -58,8 +58,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -250,11 +251,11 @@ static void prvTaskExitError( void ) void vPortSVCHandler( void ) { __asm volatile ( - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ - " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ - " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ - " msr psp, r0 \n"/* Restore the task stack pointer. */ + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ " isb \n" " mov r0, #0 \n" " msr basepri, r0 \n" @@ -273,17 +274,17 @@ static void prvPortStartFirstTask( void ) * would otherwise result in the unnecessary leaving of space in the SVC stack * for lazy saving of FPU registers. */ __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n" " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ " msr control, r0 \n" - " cpsie i \n"/* Globally enable interrupts. */ + " cpsie i \n" /* Globally enable interrupts. */ " cpsie f \n" " dsb \n" " isb \n" - " svc 0 \n"/* System call to start first task. */ + " svc 0 \n" /* System call to start first task. */ " nop \n" " .ltorg \n" ); @@ -295,10 +296,6 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* This port can be used on all revisions of the Cortex-M7 core other than * the r0p1 parts. r0p1 parts should use the port from the * /source/portable/GCC/ARM_CM7/r0p1 directory. */ @@ -307,7 +304,8 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -317,7 +315,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -329,22 +327,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -353,7 +385,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -364,7 +396,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -448,15 +480,15 @@ void xPortPendSVHandler( void ) " mrs r0, psp \n" " isb \n" " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ " ldr r2, [r3] \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n" " \n" - " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" @@ -468,12 +500,12 @@ void xPortPendSVHandler( void ) " msr basepri, r0 \n" " ldmia sp!, {r0, r3} \n" " \n" - " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ " ldr r0, [r1] \n" " \n" - " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ " it eq \n" " vldmiaeq r0!, {s16-s31} \n" " \n" @@ -767,10 +799,10 @@ static void vPortEnableVFP( void ) { __asm volatile ( - " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */ + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ " ldr r1, [r0] \n" " \n" - " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */ + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ " str r1, [r0] \n" " bx r14 \n" " .ltorg \n" diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 0ab47e05b86..a3b2b46c989 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -57,16 +57,21 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + typedef uint64_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffffffffffULL + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -75,6 +80,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c old mode 100644 new mode 100755 index 3125f7824d4..1733fd82072 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -93,8 +93,9 @@ /* Constants required to access and manipulate the SysTick. */ #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to manipulate the VFP. */ @@ -412,10 +413,6 @@ static void prvRestoreContextOfFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See - * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); - /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 * and r0p1 cores. */ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) @@ -430,7 +427,8 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -440,7 +438,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -452,22 +450,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -476,7 +508,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -487,7 +519,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -1041,4 +1073,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ +/*-----------------------------------------------------------*/ \ No newline at end of file diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index 462075854ce..df23d95381f 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -201,6 +203,7 @@ typedef struct MPU_SETTINGS #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index b654748e138..adb47d8420f 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index b654748e138..adb47d8420f 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index a9c69aae7fa..316dba13b8e --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -52,8 +52,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -244,11 +245,11 @@ static void prvTaskExitError( void ) void vPortSVCHandler( void ) { __asm volatile ( - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ - " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ - " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ - " msr psp, r0 \n"/* Restore the task stack pointer. */ + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ " isb \n" " mov r0, #0 \n" " msr basepri, r0 \n" @@ -267,17 +268,17 @@ static void prvPortStartFirstTask( void ) * would otherwise result in the unnecessary leaving of space in the SVC stack * for lazy saving of FPU registers. */ __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n" " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ " msr control, r0 \n" - " cpsie i \n"/* Globally enable interrupts. */ + " cpsie i \n" /* Globally enable interrupts. */ " cpsie f \n" " dsb \n" " isb \n" - " svc 0 \n"/* System call to start first task. */ + " svc 0 \n" /* System call to start first task. */ " nop \n" " .ltorg \n" ); @@ -289,13 +290,10 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -305,7 +303,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -317,22 +315,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -341,7 +373,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -352,7 +384,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -436,34 +468,34 @@ void xPortPendSVHandler( void ) " mrs r0, psp \n" " isb \n" " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ " ldr r2, [r3] \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n" " \n" - " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" - " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " msr basepri, r0 \n" " dsb \n" " isb \n" - " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " bl vTaskSwitchContext \n" " mov r0, #0 \n" " msr basepri, r0 \n" " ldmia sp!, {r0, r3} \n" " \n" - " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ " ldr r0, [r1] \n" " \n" - " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ " it eq \n" " vldmiaeq r0!, {s16-s31} \n" " \n" @@ -757,10 +789,10 @@ static void vPortEnableVFP( void ) { __asm volatile ( - " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */ + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ " ldr r1, [r0] \n" " \n" - " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */ + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ " str r1, [r0] \n" " bx r14 \n" " .ltorg \n" diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 214dc2bffb5..897fef5f468 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -75,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index 830fa2c1379..fec6923394c 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index 830fa2c1379..fec6923394c 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CR5/port.c b/portable/GCC/ARM_CR5/port.c index 8a9839cde56..2cbc24dbf9d 100644 --- a/portable/GCC/ARM_CR5/port.c +++ b/portable/GCC/ARM_CR5/port.c @@ -74,25 +74,52 @@ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 ) #endif -/* Some vendor specific files default configCLEAR_TICK_INTERRUPT() in - * portmacro.h. */ +/* + * __ARM_FP is defined by the c preprocessor when FPU support is enabled, + * usually with the -mfpu= argument and -mfloat-abi=. + * + * Note: Some implementations of the c standard library may use FPU registers + * for generic memory operations (memcpy, etc). + * When setting configUSE_TASK_FPU_SUPPORT == 1, care must be taken to + * ensure that the FPU registers are not used without an FPU context. + */ +#if ( configUSE_TASK_FPU_SUPPORT == 0 ) + #ifdef __ARM_FP + #error __ARM_FP is defined, so configUSE_TASK_FPU_SUPPORT must be set to either to 1 or 2. + #endif /* __ARM_FP */ +#elif ( configUSE_TASK_FPU_SUPPORT == 1 ) || ( configUSE_TASK_FPU_SUPPORT == 2 ) + #ifndef __ARM_FP + #error __ARM_FP is not defined, so configUSE_TASK_FPU_SUPPORT must be set to 0. + #endif /* __ARM_FP */ +#endif /* configUSE_TASK_FPU_SUPPORT */ + +/* + * Some vendor specific files default configCLEAR_TICK_INTERRUPT() in + * portmacro.h. + */ #ifndef configCLEAR_TICK_INTERRUPT #define configCLEAR_TICK_INTERRUPT() #endif -/* A critical section is exited when the critical section nesting count reaches - * this value. */ +/* + * A critical section is exited when the critical section nesting count reaches + * this value. + */ #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) -/* In all GICs 255 can be written to the priority mask register to unmask all - * (but the lowest) interrupt priority. */ +/* + * In all GICs 255 can be written to the priority mask register to unmask all + * (but the lowest) interrupt priority. + */ #define portUNMASK_VALUE ( 0xFFUL ) -/* Tasks are not created with a floating point context, but can be given a +/* + * Tasks are not created with a floating point context, but can be given a * floating point context after they have been created. A variable is stored as * part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task * does not have an FPU context, or any other value if the task does have an FPU - * context. */ + * context. + */ #define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 ) /* Constants required to setup the initial task context. */ @@ -101,8 +128,10 @@ #define portINTERRUPT_ENABLE_BIT ( 0x80UL ) #define portTHUMB_MODE_ADDRESS ( 0x01UL ) -/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary - * point is zero. */ +/* + * Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary + * point is zero. + */ #define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 ) /* Masks all bits in the APSR other than the mode bits. */ @@ -140,15 +169,19 @@ #define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) #define portBIT_0_SET ( ( uint8_t ) 0x01 ) -/* Let the user override the pre-loading of the initial LR with the address of +/* + * Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case is messes up unwinding of the stack in the - * debugger. */ + * debugger. + */ #ifdef configTASK_RETURN_ADDRESS #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS #else #define portTASK_RETURN_ADDRESS prvTaskExitError #endif +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + /* * The space on the stack required to hold the FPU registers. * @@ -160,7 +193,8 @@ * the size of the bank remains the same. The FPU has also a 32-bit * status register. */ -#define portFPU_REGISTER_WORDS ( ( 16 * 2 ) + 1 ) + #define portFPU_REGISTER_WORDS ( ( 16 * 2 ) + 1 ) +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ @@ -175,6 +209,8 @@ extern void vPortRestoreTaskContext( void ); */ static void prvTaskExitError( void ); +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + /* * If the application provides an implementation of vApplicationIRQHandler(), * then it will get called directly without saving the FPU registers on @@ -194,26 +230,36 @@ static void prvTaskExitError( void ); * FPU registers to be saved on interrupt entry their IRQ handler must be * called vApplicationIRQHandler(). */ -void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) __attribute__((weak) ); + void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) __attribute__( ( weak ) ); +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ -/* A variable is used to keep track of the critical section nesting. This +/* + * A variable is used to keep track of the critical section nesting. This * variable has to be stored as part of the task context and must be initialised to * a non zero value to ensure interrupts don't inadvertently become unmasked before * the scheduler starts. As it is stored as part of the task context it will - * automatically be set to 0 when the first task is started. */ + * automatically be set to 0 when the first task is started. + */ volatile uint32_t ulCriticalNesting = 9999UL; -/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then - * a floating point context must be saved and restored for the task. */ -uint32_t ulPortTaskHasFPUContext = pdFALSE; +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + +/* + * Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then + * a floating point context must be saved and restored for the task. + */ + uint32_t ulPortTaskHasFPUContext = pdFALSE; +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /* Set to 1 to pend a context switch from an ISR. */ uint32_t ulPortYieldRequired = pdFALSE; -/* Counts the interrupt nesting depth. A context switch is only performed if - * if the nesting depth is 0. */ +/* + * Counts the interrupt nesting depth. A context switch is only performed if + * if the nesting depth is 0. + */ uint32_t ulPortInterruptNesting = 0UL; /* Used in asm code. */ @@ -231,12 +277,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters ) { - /* Setup the initial stack of the task. The stack is set exactly as + /* + * Setup the initial stack of the task. The stack is set exactly as * expected by the portRESTORE_CONTEXT() macro. * * The fist real value on the stack is the status register, which is set for * system mode, with interrupts enabled. A few NULLs are added first to ensure - * GDB does not try decoding a non-existent return address. */ + * GDB does not try decoding a non-existent return address. + */ *pxTopOfStack = ( StackType_t ) NULL; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) NULL; @@ -285,24 +333,31 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, *pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack--; - /* The task will start with a critical nesting count of 0 as interrupts are - * enabled. */ + /* + * The task will start with a critical nesting count of 0 as interrupts are + * enabled. + */ + pxTopOfStack--; *pxTopOfStack = portNO_CRITICAL_NESTING; - #if( configUSE_TASK_FPU_SUPPORT == 1 ) + #if ( configUSE_TASK_FPU_SUPPORT == 1 ) { - /* The task will start without a floating point context. A task that - uses the floating point hardware must call vPortTaskUsesFPU() before - executing any floating point instructions. */ + /* + * The task will start without a floating point context. + * A task that uses the floating point hardware must call + * vPortTaskUsesFPU() before executing any floating point + * instructions. + */ pxTopOfStack--; *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT; } - #elif( configUSE_TASK_FPU_SUPPORT == 2 ) + #elif ( configUSE_TASK_FPU_SUPPORT == 2 ) { - /* The task will start with a floating point context. Leave enough - space for the registers - and ensure they are initialized to 0. */ + /* + * The task will start with a floating point context. Leave enough + * space for the registers and ensure they are initialized to 0. + */ pxTopOfStack -= portFPU_REGISTER_WORDS; memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) ); @@ -310,9 +365,9 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, *pxTopOfStack = pdTRUE; ulPortTaskHasFPUContext = pdTRUE; } - #else + #elif ( configUSE_TASK_FPU_SUPPORT != 0 ) { - #error Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined. + #error Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 0, 1, or 2. } #endif /* configUSE_TASK_FPU_SUPPORT */ @@ -322,12 +377,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, static void prvTaskExitError( void ) { - /* A function that implements a task must not exit or attempt to return to + /* + * A function that implements a task must not exit or attempt to return to * its caller as there is nothing to return to. If a task wants to exit it * should instead call vTaskDelete( NULL ). * * Artificially force an assert() to be triggered if configASSERT() is - * defined, then stop here so application writers can catch the error. */ + * defined, then stop here so application writers can catch the error. + */ configASSERT( ulPortInterruptNesting == ~0UL ); portDISABLE_INTERRUPTS(); @@ -337,11 +394,15 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ -void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) -{ - ( void ) ulICCIAR; - configASSERT( ( volatile void * ) NULL ); -} +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + + void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) /* __attribute__( ( weak ) ) */ + { + ( void ) ulICCIAR; + configASSERT( ( volatile void * ) NULL ); + } + +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) @@ -349,67 +410,82 @@ BaseType_t xPortStartScheduler( void ) uint32_t ulAPSR, ulCycles = 8; /* 8 bits per byte. */ #if ( configASSERT_DEFINED == 1 ) + { + volatile uint8_t ucOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); + volatile uint8_t ucMaxPriorityValue; + + /* + * Determine how many priority bits are implemented in the GIC. + * Save the interrupt priority value that is about to be clobbered. + */ + ucOriginalPriority = *pucFirstUserPriorityRegister; + + /* + * Determine the number of priority bits available. First write to + * all possible bits. + */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Shift to the least significant bits. */ + while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET ) { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine how many priority bits are implemented in the GIC. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; - - /* Determine the number of priority bits available. First write to - * all possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + ucMaxPriorityValue >>= ( uint8_t ) 0x01; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* + * If ulCycles reaches 0 then ucMaxPriorityValue must have been + * read as 0, indicating a misconfiguration. + */ + ulCycles--; - /* Shift to the least significant bits. */ - while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET ) + if( ulCycles == 0 ) { - ucMaxPriorityValue >>= ( uint8_t ) 0x01; - - /* If ulCycles reaches 0 then ucMaxPriorityValue must have been - * read as 0, indicating a misconfiguration. */ - ulCycles--; - - if( ulCycles == 0 ) - { - break; - } + break; } - - /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read - * value. */ - configASSERT( ucMaxPriorityValue == portLOWEST_INTERRUPT_PRIORITY ); - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; } + + /* + * Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read + * value. + */ + configASSERT( ucMaxPriorityValue == portLOWEST_INTERRUPT_PRIORITY ); + + /* + * Restore the clobbered interrupt priority register to its original + * value. + */ + *pucFirstUserPriorityRegister = ucOriginalPriority; + } #endif /* configASSERT_DEFINED */ - /* Only continue if the CPU is not in User mode. The CPU must be in a - * Privileged mode for the scheduler to start. */ + /* + * Only continue if the CPU is not in User mode. The CPU must be in a + * Privileged mode for the scheduler to start. + */ __asm volatile ( "MRS %0, APSR" : "=r" ( ulAPSR )::"memory" ); ulAPSR &= portAPSR_MODE_BITS_MASK; configASSERT( ulAPSR != portAPSR_USER_MODE ); if( ulAPSR != portAPSR_USER_MODE ) { - /* Only continue if the binary point value is set to its lowest possible + /* + * Only continue if the binary point value is set to its lowest possible * setting. See the comments in vPortValidateInterruptPriority() below for - * more information. */ + * more information. + */ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ); if( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ) { - /* Interrupts are turned off in the CPU itself to ensure tick does + /* + * Interrupts are turned off in the CPU itself to ensure tick does * not execute while the scheduler is being started. Interrupts are * automatically turned back on in the CPU when the first task starts - * executing. */ + * executing. + */ portCPU_IRQ_DISABLE(); /* Start the timer that generates the tick ISR. */ @@ -420,20 +496,25 @@ BaseType_t xPortStartScheduler( void ) } } - /* Will only get here if vTaskStartScheduler() was called with the CPU in + /* + * Will only get here if vTaskStartScheduler() was called with the CPU in * a non-privileged mode or the binary point register was not set to its lowest * possible value. prvTaskExitError() is referenced to prevent a compiler * warning about it being defined but not referenced in the case that the user - * defines their own exit address. */ + * defines their own exit address. + */ ( void ) prvTaskExitError; + return 0; } /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) { - /* Not implemented in ports where there is nothing to return to. - * Artificially force an assert. */ + /* + * Not implemented in ports where there is nothing to return to. + * Artificially force an assert. + */ configASSERT( ulCriticalNesting == 1000UL ); } /*-----------------------------------------------------------*/ @@ -443,16 +524,20 @@ void vPortEnterCritical( void ) /* Mask interrupts up to the max syscall interrupt priority. */ ulPortSetInterruptMask(); - /* Now interrupts are disabled ulCriticalNesting can be accessed + /* + * Now interrupts are disabled ulCriticalNesting can be accessed * directly. Increment ulCriticalNesting to keep a count of how many times - * portENTER_CRITICAL() has been called. */ + * portENTER_CRITICAL() has been called. + */ ulCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so + /* + * This is not the interrupt safe version of the enter critical function so * assert() if it is being called from an interrupt context. Only API * functions that end in "FromISR" can be used in an interrupt. Only assert if * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ + * assert function also uses a critical section. + */ if( ulCriticalNesting == 1 ) { configASSERT( ulPortInterruptNesting == 0 ); @@ -464,16 +549,19 @@ void vPortExitCritical( void ) { if( ulCriticalNesting > portNO_CRITICAL_NESTING ) { - /* Decrement the nesting count as the critical section is being - * exited. */ + /* Decrement the nesting count as the critical section is being exited. */ ulCriticalNesting--; - /* If the nesting level has reached zero then all interrupt - * priorities must be re-enabled. */ + /* + * If the nesting level has reached zero then all interrupt + * priorities must be re-enabled. + */ if( ulCriticalNesting == portNO_CRITICAL_NESTING ) { - /* Critical nesting has reached zero so all interrupt priorities - * should be unmasked. */ + /* + * Critical nesting has reached zero so all interrupt priorities + * should be unmasked. + */ portCLEAR_INTERRUPT_MASK(); } } @@ -482,11 +570,13 @@ void vPortExitCritical( void ) void FreeRTOS_Tick_Handler( void ) { - /* Set interrupt mask before altering scheduler structures. The tick + /* + * Set interrupt mask before altering scheduler structures. The tick * handler runs at the lowest priority, so interrupts cannot already be masked, * so there is no need to save and restore the current mask value. It is * necessary to turn off interrupts in the CPU itself while the ICCPMR is being - * updated. */ + * updated. + */ portCPU_IRQ_DISABLE(); portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ); __asm volatile ( "dsb \n" @@ -505,21 +595,23 @@ void FreeRTOS_Tick_Handler( void ) } /*-----------------------------------------------------------*/ -#if( configUSE_TASK_FPU_SUPPORT != 2 ) +#if ( configUSE_TASK_FPU_SUPPORT == 1 ) void vPortTaskUsesFPU( void ) { uint32_t ulInitialFPSCR = 0; - /* A task is registering the fact that it needs an FPU context. Set the - * FPU flag (which is saved as part of the task context). */ + /* + * A task is registering the fact that it needs an FPU context. Set the + * FPU flag (which is saved as part of the task context). + */ ulPortTaskHasFPUContext = pdTRUE; /* Initialise the floating point status register. */ __asm volatile ( "FMXR FPSCR, %0" ::"r" ( ulInitialFPSCR ) : "memory" ); } -#endif /* configUSE_TASK_FPU_SUPPORT */ +#endif /* configUSE_TASK_FPU_SUPPORT == 1 */ /*-----------------------------------------------------------*/ void vPortClearInterruptMask( uint32_t ulNewMaskValue ) @@ -535,8 +627,7 @@ uint32_t ulPortSetInterruptMask( void ) { uint32_t ulReturn; - /* Interrupt in the CPU must be turned off while the ICCPMR is being - * updated. */ + /* Interrupts must be masked while ICCPMR is updated. */ portCPU_IRQ_DISABLE(); if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ) @@ -562,7 +653,8 @@ uint32_t ulPortSetInterruptMask( void ) void vPortValidateInterruptPriority( void ) { - /* The following assertion will fail if a service routine (ISR) for + /* + * The following assertion will fail if a service routine (ISR) for * an interrupt that has been assigned a priority above * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API * function. ISR safe FreeRTOS API functions must *only* be called @@ -575,11 +667,13 @@ uint32_t ulPortSetInterruptMask( void ) * configMAX_SYSCALL_INTERRUPT_PRIORITY. * * FreeRTOS maintains separate thread and ISR API functions to ensure - * interrupt entry is as fast and simple as possible. */ + * interrupt entry is as fast and simple as possible. + */ configASSERT( portICCRPR_RUNNING_PRIORITY_REGISTER >= ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ); - /* Priority grouping: The interrupt controller (GIC) allows the bits + /* + * Priority grouping: The interrupt controller (GIC) allows the bits * that define each interrupt's priority to be split between bits that * define the interrupt's pre-emption priority bits and bits that define * the interrupt's sub-priority. For simplicity all bits must be defined @@ -588,7 +682,8 @@ uint32_t ulPortSetInterruptMask( void ) * * The priority grouping is configured by the GIC's binary point register * (ICCBPR). Writing 0 to ICCBPR will ensure it is set to its lowest - * possible value (which may be above 0). */ + * possible value (which may be above 0). + */ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ); } diff --git a/portable/GCC/ARM_CR5/portASM.S b/portable/GCC/ARM_CR5/portASM.S index c44ea6b7913..c331057d610 100644 --- a/portable/GCC/ARM_CR5/portASM.S +++ b/portable/GCC/ARM_CR5/portASM.S @@ -45,7 +45,10 @@ .extern vTaskSwitchContext .extern vApplicationIRQHandler .extern ulPortInterruptNesting + +#if defined( __ARM_FP ) .extern ulPortTaskHasFPUContext +#endif /* __ARM_FP */ .global FreeRTOS_IRQ_Handler .global FreeRTOS_SWI_Handler @@ -64,20 +67,21 @@ LDR R1, [R2] PUSH {R1} - /* Does the task have a floating point context that needs saving? If - ulPortTaskHasFPUContext is 0 then no. */ - LDR R2, ulPortTaskHasFPUContextConst - LDR R3, [R2] - CMP R3, #0 + #if defined( __ARM_FP ) + /* Does the task have a floating point context that needs saving? If + ulPortTaskHasFPUContext is 0 then no. */ + LDR R2, ulPortTaskHasFPUContextConst + LDR R3, [R2] + CMP R3, #0 - /* Save the floating point context, if any. */ - FMRXNE R1, FPSCR - VPUSHNE {D0-D15} - /*VPUSHNE {D16-D31}*/ - PUSHNE {R1} + /* Save the floating point context, if any. */ + FMRXNE R1, FPSCR + VPUSHNE {D0-D15} + PUSHNE {R1} - /* Save ulPortTaskHasFPUContext itself. */ - PUSH {R3} + /* Save ulPortTaskHasFPUContext itself. */ + PUSH {R3} + #endif /* __ARM_FP */ /* Save the stack pointer in the TCB. */ LDR R0, pxCurrentTCBConst @@ -95,18 +99,21 @@ LDR R1, [R0] LDR SP, [R1] - /* Is there a floating point context to restore? If the restored - ulPortTaskHasFPUContext is zero then no. */ - LDR R0, ulPortTaskHasFPUContextConst - POP {R1} - STR R1, [R0] - CMP R1, #0 - - /* Restore the floating point context, if any. */ - POPNE {R0} - /*VPOPNE {D16-D31}*/ - VPOPNE {D0-D15} - VMSRNE FPSCR, R0 + #if defined( __ARM_FP ) + /* + * Is there a floating point context to restore? If the restored + * ulPortTaskHasFPUContext is zero then no. + */ + LDR R0, ulPortTaskHasFPUContextConst + POP {R1} + STR R1, [R0] + CMP R1, #0 + + /* Restore the floating point context, if any. */ + POPNE {R0} + VPOPNE {D0-D15} + VMSRNE FPSCR, R0 + #endif /* __ARM_FP */ /* Restore the critical section nesting depth. */ LDR R0, ulCriticalNestingConst @@ -132,8 +139,6 @@ .endm - - /****************************************************************************** * SVC handler is used to start the scheduler. *****************************************************************************/ @@ -279,22 +284,25 @@ switch_before_exit: * FPU registers to be saved on interrupt entry their IRQ handler must be * called vApplicationIRQHandler(). *****************************************************************************/ - .align 4 .weak vApplicationIRQHandler .type vApplicationIRQHandler, %function vApplicationIRQHandler: + PUSH {LR} - FMRX R1, FPSCR - VPUSH {D0-D15} - PUSH {R1} - LDR r1, vApplicationFPUSafeIRQHandlerConst - BLX r1 + #if defined( __ARM_FP ) + FMRX R1, FPSCR + VPUSH {D0-D15} + PUSH {R1} - POP {R0} - VPOP {D0-D15} - VMSR FPSCR, R0 + LDR r1, vApplicationFPUSafeIRQHandlerConst + BLX r1 + + POP {R0} + VPOP {D0-D15} + VMSR FPSCR, R0 + #endif /* __ARM_FP */ POP {PC} @@ -303,11 +311,15 @@ ulICCEOIRConst: .word ulICCEOIR ulICCPMRConst: .word ulICCPMR pxCurrentTCBConst: .word pxCurrentTCB ulCriticalNestingConst: .word ulCriticalNesting -ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext + +#if defined( __ARM_FP ) + ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext + vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler +#endif /* __ARM_FP */ + ulMaxAPIPriorityMaskConst: .word ulMaxAPIPriorityMask vTaskSwitchContextConst: .word vTaskSwitchContext vApplicationIRQHandlerConst: .word vApplicationIRQHandler ulPortInterruptNestingConst: .word ulPortInterruptNesting -vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler .end diff --git a/portable/GCC/ARM_CR5/portmacro.h b/portable/GCC/ARM_CR5/portmacro.h index 4bd25bb048b..ff7337d1502 100644 --- a/portable/GCC/ARM_CR5/portmacro.h +++ b/portable/GCC/ARM_CR5/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -44,161 +46,175 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +typedef uint32_t TickType_t; +#define portMAX_DELAY ( TickType_t ) 0xffffffffUL /*-----------------------------------------------------------*/ /* Hardware specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Task utilities. */ /* Called at the end of an ISR that can cause a context switch. */ - #define portEND_SWITCHING_ISR( xSwitchRequired ) \ - { \ - extern uint32_t ulPortYieldRequired; \ - \ - if( xSwitchRequired != pdFALSE ) \ - { \ - ulPortYieldRequired = pdTRUE; \ - } \ +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + { \ + extern uint32_t ulPortYieldRequired; \ + \ + if( xSwitchRequired != pdFALSE ) \ + { \ + ulPortYieldRequired = pdTRUE; \ + } \ } - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) - #define portYIELD() __asm volatile ( "SWI 0" ::: "memory" ); +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() __asm volatile ( "SWI 0" ::: "memory" ); /*----------------------------------------------------------- * Critical section control *----------------------------------------------------------*/ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulPortSetInterruptMask( void ); - extern void vPortClearInterruptMask( uint32_t ulNewMaskValue ); - extern void vPortInstallFreeRTOSVectorTable( void ); - -/* These macros do not globally disable/enable interrupts. They do mask off - * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */ - #define portENTER_CRITICAL() vPortEnterCritical(); - #define portEXIT_CRITICAL() vPortExitCritical(); - #define portDISABLE_INTERRUPTS() ulPortSetInterruptMask() - #define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 ) - #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x ) +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulPortSetInterruptMask( void ); +extern void vPortClearInterruptMask( uint32_t ulNewMaskValue ); +extern void vPortInstallFreeRTOSVectorTable( void ); + +/* + * These macros do not globally disable/enable interrupts. They do mask off + * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. + */ +#define portENTER_CRITICAL() vPortEnterCritical(); +#define portEXIT_CRITICAL() vPortExitCritical(); +#define portDISABLE_INTERRUPTS() ulPortSetInterruptMask() +#define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 ) +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x ) /*-----------------------------------------------------------*/ -/* Task function macros as described on the FreeRTOS.org WEB site. These are +/* + * Task function macros as described on the FreeRTOS.org WEB site. These are * not required for this port but included in case common demo code that uses these - * macros is used. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - -/* Prototype of the FreeRTOS tick handler. This must be installed as the - * handler for whichever peripheral is used to generate the RTOS tick. */ - void FreeRTOS_Tick_Handler( void ); - -/* If configUSE_TASK_FPU_SUPPORT is set to 1 (or left undefined) then tasks are -created without an FPU context and must call vPortTaskUsesFPU() to give -themselves an FPU context before using any FPU instructions. If -configUSE_TASK_FPU_SUPPORT is set to 2 then all tasks will have an FPU context -by default. */ -#if( configUSE_TASK_FPU_SUPPORT != 2 ) + * macros is used. + */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) + +/* + * Prototype of the FreeRTOS tick handler. This must be installed as the + * handler for whichever peripheral is used to generate the RTOS tick. + */ +void FreeRTOS_Tick_Handler( void ); + +/* + * If configUSE_TASK_FPU_SUPPORT is set to 1, then tasks are created without an + * FPU context and must call vPortTaskUsesFPU() to allocate an FPU context + * prior to any FPU instructions. If configUSE_TASK_FPU_SUPPORT is set to 2, + * then all tasks have an FPU context allocated by default. + */ +#if ( configUSE_TASK_FPU_SUPPORT == 1 ) void vPortTaskUsesFPU( void ); -#else - /* Each task has an FPU context already, so define this function away to - nothing to prevent it being called accidentally. */ + #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() +#elif ( configUSE_TASK_FPU_SUPPORT == 2 ) + +/* + * Each task has an FPU context already, so define this function away to + * prevent it being called accidentally. + */ #define vPortTaskUsesFPU() + #define portTASK_USES_FLOATING_POINT() #endif /* configUSE_TASK_FPU_SUPPORT */ - #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() - #define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) - #define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL ) +#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) +#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL ) /* Architecture specific optimisations. */ - #ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION - #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 - #endif +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif - #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 /* Store/clear the ready priorities in a bit map. */ - #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) - #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) /*-----------------------------------------------------------*/ - #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) - - #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ - - #ifdef configASSERT - void vPortValidateInterruptPriority( void ); - #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() - #endif /* configASSERT */ + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) - #define portNOP() __asm volatile ( "NOP" ) +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif /* configASSERT */ - #ifdef __cplusplus - } /* extern C */ - #endif +#define portNOP() __asm volatile ( "NOP" ) -/* The number of bits to shift for an interrupt priority is dependent on the - * number of bits implemented by the interrupt controller. */ - #if configUNIQUE_INTERRUPT_PRIORITIES == 16 - #define portPRIORITY_SHIFT 4 - #define portMAX_BINARY_POINT_VALUE 3 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 32 - #define portPRIORITY_SHIFT 3 - #define portMAX_BINARY_POINT_VALUE 2 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 64 - #define portPRIORITY_SHIFT 2 - #define portMAX_BINARY_POINT_VALUE 1 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 128 - #define portPRIORITY_SHIFT 1 - #define portMAX_BINARY_POINT_VALUE 0 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 256 - #define portPRIORITY_SHIFT 0 - #define portMAX_BINARY_POINT_VALUE 0 - #else /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ - #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware - #endif /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ +/* + * The number of bits to shift for an interrupt priority is dependent on the + * number of bits implemented by the interrupt controller. + */ +#if configUNIQUE_INTERRUPT_PRIORITIES == 16 + #define portPRIORITY_SHIFT 4 + #define portMAX_BINARY_POINT_VALUE 3 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 32 + #define portPRIORITY_SHIFT 3 + #define portMAX_BINARY_POINT_VALUE 2 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 64 + #define portPRIORITY_SHIFT 2 + #define portMAX_BINARY_POINT_VALUE 1 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 128 + #define portPRIORITY_SHIFT 1 + #define portMAX_BINARY_POINT_VALUE 0 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 256 + #define portPRIORITY_SHIFT 0 + #define portMAX_BINARY_POINT_VALUE 0 +#else /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ + #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware +#endif /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ /* Interrupt controller access addresses. */ - #define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 ) - #define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C ) - #define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 ) - #define portICCBPR_BINARY_POINT_OFFSET ( 0x08 ) - #define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 ) - - #define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET ) - #define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) ) - #define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ) - #define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET ) - #define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) - #define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) - #define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) - - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 ) +#define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C ) +#define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 ) +#define portICCBPR_BINARY_POINT_OFFSET ( 0x08 ) +#define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 ) + +#define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET ) +#define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) ) +#define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ) +#define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET ) +#define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) +#define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) +#define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) + +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } /* extern C */ +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ATMega323/portmacro.h b/portable/GCC/ATMega323/portmacro.h index 7afdef9075c..c22706de5d4 100644 --- a/portable/GCC/ATMega323/portmacro.h +++ b/portable/GCC/ATMega323/portmacro.h @@ -65,12 +65,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 2ebc711402a..86014a135b5 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -109,12 +109,14 @@ typedef unsigned long UBaseType_t; #define configTICK_TC_IRQ ATPASTE2(AVR32_TC_IRQ, configTICK_TC_CHANNEL) -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 486db878961..86000665681 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 8792cd96d18..9b4c1da8833 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index f568853fc4e..4c87abd163c 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index 1a458f9ae66..9202510ccc8 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -58,12 +58,14 @@ typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 149de12178b..8d6827a57e9 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MicroBlaze/portmacro.h b/portable/GCC/MicroBlaze/portmacro.h index 5bc52ffe26c..c646496ab3f 100644 --- a/portable/GCC/MicroBlaze/portmacro.h +++ b/portable/GCC/MicroBlaze/portmacro.h @@ -56,16 +56,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -113,6 +115,7 @@ void vTaskSwitchContext(); #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ diff --git a/portable/GCC/MicroBlazeV8/portmacro.h b/portable/GCC/MicroBlazeV8/portmacro.h index 28e54017265..be9dfacc42f 100644 --- a/portable/GCC/MicroBlazeV8/portmacro.h +++ b/portable/GCC/MicroBlazeV8/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -150,6 +152,7 @@ extern volatile uint32_t ulTaskSwitchRequested; #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index f41205e3b93..c26daa79fc2 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -150,6 +152,7 @@ extern volatile uint32_t ulTaskSwitchRequested; #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ #if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) diff --git a/portable/GCC/NiosII/portmacro.h b/portable/GCC/NiosII/portmacro.h index fb482ffb978..7a159925db2 100644 --- a/portable/GCC/NiosII/portmacro.h +++ b/portable/GCC/NiosII/portmacro.h @@ -58,16 +58,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index eaad8fe7b75..d7f32944c40 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index eaad8fe7b75..d7f32944c40 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RL78/portmacro.h b/portable/GCC/RL78/portmacro.h index 4b3cc497235..8108f1c1221 100644 --- a/portable/GCC/RL78/portmacro.h +++ b/portable/GCC/RL78/portmacro.h @@ -54,12 +54,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 842754f1f1e..863596685aa 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -63,16 +63,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index 7c3fefc253c..80fc8c8041d 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index 5919edfe081..7e1fd1e0c92 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index 5919edfe081..7e1fd1e0c92 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index 75d405fb8a1..6b76374c882 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -76,16 +76,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index b7fbe669305..85262dac4d5 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/TriCore_1782/portmacro.h b/portable/GCC/TriCore_1782/portmacro.h index 4e51954afcc..734abc5949c 100644 --- a/portable/GCC/TriCore_1782/portmacro.h +++ b/portable/GCC/TriCore_1782/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*---------------------------------------------------------------------------*/ diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index 673cf156c4f..e6f67cdd3b7 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if (configUSE_16_BIT_TICKS==1) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef unsigned int TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM0/port.c b/portable/IAR/ARM_CM0/port.c index ad168e577ae..c55af1fd8ad 100644 --- a/portable/IAR/ARM_CM0/port.c +++ b/portable/IAR/ARM_CM0/port.c @@ -56,13 +56,6 @@ /* Constants required to set up the initial stack. */ #define portINITIAL_XPSR ( 0x01000000 ) -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value 255 should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 0 -#endif - /* Each task maintains its own interrupt status in the critical nesting * variable. */ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index 00561582349..ce1aa08fc9d 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -57,16 +57,18 @@ typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c old mode 100644 new mode 100755 index cef6c247216..f1c78e46240 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -55,8 +55,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -86,13 +87,6 @@ * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value 255 should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - /* Let the user override the default SysTick clock rate. If defined by the * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the * configuration register. */ @@ -214,13 +208,10 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -230,7 +221,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -242,22 +233,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -266,7 +291,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -277,7 +302,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM3/portmacro.h b/portable/IAR/ARM_CM3/portmacro.h index 3825a7c5317..c334978ea73 100644 --- a/portable/IAR/ARM_CM3/portmacro.h +++ b/portable/IAR/ARM_CM3/portmacro.h @@ -60,16 +60,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.h b/portable/IAR/ARM_CM35P/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.s b/portable/IAR/ARM_CM35P/non_secure/portasm.s new file mode 100644 index 00000000000..a193cd7b80e --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.s @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN xSecureContext + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + EXTERN SecureContext_SaveContext + EXTERN SecureContext_LoadContext + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vPortAllocateSecureContext + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler + PUBLIC vPortFreeSecureContext +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vPortAllocateSecureContext: + svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r3, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r2] /* Program RNR = 4. */ + adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + ldr r5, =xSecureContext + str r1, [r5] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + msr control, r3 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r4 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + ldr r4, =xSecureContext + str r1, [r4] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + mrs r2, psp /* Read PSP in r2. */ + + cbz r0, save_ns_context /* No secure context to save. */ + push {r0-r2, r14} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* LR = r3. */ + lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ +#if ( configENABLE_MPU == 1 ) + subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ +#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ +#endif /* configENABLE_MPU */ + b select_next_task + + save_ns_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #16 /* r2 = r2 + 16. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + subs r2, r2, #16 /* r2 = r2 - 16. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #12 /* r2 = r2 + 12. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + subs r2, r2, #12 /* r2 = r2 - 12. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + + #if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r3] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ + ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r3] /* Program MAIR0. */ + ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r3] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r3] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + + #if ( configENABLE_MPU == 1 ) + ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r3 /* Restore the CONTROL register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #else /* configENABLE_MPU */ + ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #endif /* configENABLE_MPU */ + + restore_ns_context: + ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + +vPortFreeSecureContext: + /* r0 = uint32_t *pulTCB. */ + ldr r2, [r0] /* The first item in the TCB is the top of the stack. */ + ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */ + cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */ + it ne + svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacro.h b/portable/IAR/ARM_CM35P/non_secure/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context.c b/portable/IAR/ARM_CM35P/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context.h b/portable/IAR/ARM_CM35P/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s b/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s new file mode 100644 index 00000000000..400bd0107a3 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s @@ -0,0 +1,86 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + SECTION .text:CODE:NOROOT(2) + THUMB + +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + PUBLIC SecureContext_LoadContextAsm + PUBLIC SecureContext_SaveContextAsm +/*-----------------------------------------------------------*/ + +SecureContext_LoadContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + +#if ( configENABLE_MPU == 1 ) + ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + msr control, r3 /* CONTROL = r3. */ +#endif /* configENABLE_MPU */ + + msr psplim, r2 /* PSPLIM = r2. */ + msr psp, r1 /* PSP = r1. */ + + load_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + +SecureContext_SaveContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + mrs r1, psp /* r1 = PSP. */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + +#if ( configENABLE_MPU == 1 ) + mrs r2, control /* r2 = CONTROL. */ + stmdb r1!, {r2} /* Store CONTROL value on the stack. */ +#endif /* configENABLE_MPU */ + + str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + movs r1, #0 /* r1 = securecontextNO_STACK. */ + msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */ + msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + + save_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P/secure/secure_heap.c b/portable/IAR/ARM_CM35P/secure/secure_heap.c new file mode 100644 index 00000000000..157fdbf0eec --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_heap.c @@ -0,0 +1,454 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_heap.h b/portable/IAR/ARM_CM35P/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_init.c b/portable/IAR/ARM_CM35P/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_init.h b/portable/IAR/ARM_CM35P/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_port_macros.h b/portable/IAR/ARM_CM35P/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s new file mode 100644 index 00000000000..581b84d4951 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s @@ -0,0 +1,262 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + msr control, r2 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r2 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + mrs r0, psp /* Read PSP in r0. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ +#if ( configENABLE_MPU == 1 ) + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r2, control /* r2 = CONTROL. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ +#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ +#endif /* configENABLE_MPU */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + str r0, [r1] /* Save the new top of stack in TCB. */ + + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ +#else /* configENABLE_MPU */ + ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ +#endif /* configENABLE_MPU */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + + #if ( configENABLE_MPU == 1 ) + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r2 /* Restore the CONTROL register value for the task. */ +#else /* configENABLE_MPU */ + msr psplim, r2 /* Restore the PSPLIM register value for the task. */ +#endif /* configENABLE_MPU */ + msr psp, r0 /* Remember the new top of stack for the task. */ + bx r3 +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c old mode 100644 new mode 100755 index 243d0578585..05d5be0aa65 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -65,8 +65,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -239,10 +240,6 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* This port can be used on all revisions of the Cortex-M7 core other than * the r0p1 parts. r0p1 parts should use the port from the * /source/portable/GCC/ARM_CM7/r0p1 directory. */ @@ -251,7 +248,8 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -261,7 +259,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -273,22 +271,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -297,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -308,7 +340,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM4F/portmacro.h b/portable/IAR/ARM_CM4F/portmacro.h index e6c8726f5f8..148c81d14ee 100644 --- a/portable/IAR/ARM_CM4F/portmacro.h +++ b/portable/IAR/ARM_CM4F/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c old mode 100644 new mode 100755 index 2f4d1b9e349..69b7bc5d9bc --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -104,8 +104,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ @@ -193,7 +194,7 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; /** * @brief Enter critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; @@ -202,7 +203,7 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; /** * @brief Exit from critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; @@ -315,9 +316,9 @@ void vPortSVCHandler_C( uint32_t * pulParam ) { __asm volatile ( - " mrs r1, control \n"/* Obtain current control value. */ - " bic r1, r1, #1 \n"/* Set privilege bit. */ - " msr control, r1 \n"/* Write back new control value. */ + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, r1, #1 \n" /* Set privilege bit. */ + " msr control, r1 \n" /* Write back new control value. */ ::: "r1", "memory" ); } @@ -327,9 +328,9 @@ void vPortSVCHandler_C( uint32_t * pulParam ) case portSVC_RAISE_PRIVILEGE: __asm volatile ( - " mrs r1, control \n"/* Obtain current control value. */ - " bic r1, r1, #1 \n"/* Set privilege bit. */ - " msr control, r1 \n"/* Write back new control value. */ + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, r1, #1 \n" /* Set privilege bit. */ + " msr control, r1 \n" /* Write back new control value. */ ::: "r1", "memory" ); break; @@ -346,15 +347,12 @@ void vPortSVCHandler_C( uint32_t * pulParam ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 * and r0p1 cores. */ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define * configENABLE_ERRATA_837070_WORKAROUND to 1 in your * FreeRTOSConfig.h. */ @@ -363,66 +361,101 @@ BaseType_t xPortStartScheduler( void ) #endif #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ucOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif - - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ucOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -463,32 +496,49 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so - * assert() if it is being called from an interrupt context. Only API - * functions that end in "FromISR" can be used in an interrupt. Only assert if - * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ - if( uxCriticalNesting == 1 ) + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else { - configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } } - portMEMORY_BARRIER(); - - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ portDISABLE_INTERRUPTS(); uxCriticalNesting++; + /* This is not the interrupt safe version of the enter critical function so * assert() if it is being called from an interrupt context. Only API * functions that end in "FromISR" can be used in an interrupt. Only assert if @@ -498,45 +548,42 @@ void vPortEnterCritical( void ) { configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); } - } -#else - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so - * assert() if it is being called from an interrupt context. Only API - * functions that end in "FromISR" can be used in an interrupt. Only assert if - * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ - if( uxCriticalNesting == 1 ) - { - configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ void vPortExitCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); } - portMEMORY_BARRIER(); + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -544,16 +591,7 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - } -#else - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ @@ -705,7 +743,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); /* Region number. */ + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index 1e44234fae4..96787e7c31f 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -62,16 +62,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index 207f0b3eb67..9217653a7d4 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -59,8 +59,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -233,13 +234,10 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -249,7 +247,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -261,22 +259,56 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -285,7 +317,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -296,7 +328,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM7/r0p1/portmacro.h b/portable/IAR/ARM_CM7/r0p1/portmacro.h index 813cc545a4c..08867c289c3 100644 --- a/portable/IAR/ARM_CM7/r0p1/portmacro.h +++ b/portable/IAR/ARM_CM7/r0p1/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 973d6d9cac9..111f4d02667 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -64,12 +64,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index f2d02e2fa7e..eb48b59b589 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -112,12 +112,14 @@ typedef unsigned long UBaseType_t; #define configTICK_TC_IRQ ATPASTE2(AVR32_TC_IRQ, configTICK_TC_CHANNEL) -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR_AVRDx/portmacro.h b/portable/IAR/AVR_AVRDx/portmacro.h index d02c691bd86..5b4b5be8b3c 100644 --- a/portable/IAR/AVR_AVRDx/portmacro.h +++ b/portable/IAR/AVR_AVRDx/portmacro.h @@ -60,12 +60,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR_Mega0/portmacro.h b/portable/IAR/AVR_Mega0/portmacro.h index d02c691bd86..5b4b5be8b3c 100644 --- a/portable/IAR/AVR_Mega0/portmacro.h +++ b/portable/IAR/AVR_Mega0/portmacro.h @@ -60,12 +60,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 3c651b0398e..173d3fbaa2d 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index 623a7b060db..3d1d82ec17b 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index e7fd1097b78..f41d8b3445e 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/MSP430/portmacro.h b/portable/IAR/MSP430/portmacro.h index 8659c4f02fa..298307f5519 100644 --- a/portable/IAR/MSP430/portmacro.h +++ b/portable/IAR/MSP430/portmacro.h @@ -53,12 +53,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/MSP430X/portmacro.h b/portable/IAR/MSP430X/portmacro.h index d7d60480456..1ab4665b9bf 100644 --- a/portable/IAR/MSP430X/portmacro.h +++ b/portable/IAR/MSP430X/portmacro.h @@ -62,12 +62,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 7467c33f18a..9c74a3f0452 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -75,12 +75,14 @@ typedef unsigned short UBaseType_t; #endif -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef unsigned int TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index ff1dc0eb92d..d408a1389a5 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -64,16 +64,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index e80f52a63ca..e5decdaf34a 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -61,16 +61,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index 8538b152b49..e0204fa0c3c 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -79,16 +79,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index 07444eb35ae..984a4fb50e2 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -61,16 +61,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR71x/portmacro.h b/portable/IAR/STR71x/portmacro.h index 492bfb2ae8b..119eec84786 100644 --- a/portable/IAR/STR71x/portmacro.h +++ b/portable/IAR/STR71x/portmacro.h @@ -61,12 +61,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR75x/portmacro.h b/portable/IAR/STR75x/portmacro.h index b852c644120..674505d3f0c 100644 --- a/portable/IAR/STR75x/portmacro.h +++ b/portable/IAR/STR75x/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR91x/portmacro.h b/portable/IAR/STR91x/portmacro.h index a8b24da878f..43ea6d7e8dc 100644 --- a/portable/IAR/STR91x/portmacro.h +++ b/portable/IAR/STR91x/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/V850ES/portmacro.h b/portable/IAR/V850ES/portmacro.h index 0381f024471..76b1186761d 100644 --- a/portable/IAR/V850ES/portmacro.h +++ b/portable/IAR/V850ES/portmacro.h @@ -57,12 +57,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if (configUSE_16_BIT_TICKS==1) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC18F/portmacro.h b/portable/MPLAB/PIC18F/portmacro.h index 9b1e44e6a8a..6fef1afe2dd 100644 --- a/portable/MPLAB/PIC18F/portmacro.h +++ b/portable/MPLAB/PIC18F/portmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC24_dsPIC/portmacro.h b/portable/MPLAB/PIC24_dsPIC/portmacro.h index 2a6f44b18ec..83b695a43bb 100644 --- a/portable/MPLAB/PIC24_dsPIC/portmacro.h +++ b/portable/MPLAB/PIC24_dsPIC/portmacro.h @@ -51,20 +51,24 @@ extern "C" { #define portSHORT short #define portSTACK_TYPE uint16_t #define portBASE_TYPE short +#define portPOINTER_SIZE_TYPE size_t +#define SIZE_MAX ( ( size_t ) -1 ) typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff /* 16-bit tick type on a 16-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MEC14xx/portmacro.h b/portable/MPLAB/PIC32MEC14xx/portmacro.h index dad5aa3530f..d9aad5df0aa 100644 --- a/portable/MPLAB/PIC32MEC14xx/portmacro.h +++ b/portable/MPLAB/PIC32MEC14xx/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MX/portmacro.h b/portable/MPLAB/PIC32MX/portmacro.h index a81dbf341d6..e2a1078e631 100644 --- a/portable/MPLAB/PIC32MX/portmacro.h +++ b/portable/MPLAB/PIC32MX/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MZ/portmacro.h b/portable/MPLAB/PIC32MZ/portmacro.h index 43d65985504..17b266e1124 100644 --- a/portable/MPLAB/PIC32MZ/portmacro.h +++ b/portable/MPLAB/PIC32MZ/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MSVC-MingW/port.c b/portable/MSVC-MingW/port.c index 67b79baa917..f39f0ecbb88 100644 --- a/portable/MSVC-MingW/port.c +++ b/portable/MSVC-MingW/port.c @@ -160,7 +160,7 @@ TIMECAPS xTimeCaps; /* Just to prevent compiler warnings. */ ( void ) lpParameter; - for( ;; ) + while( xPortRunning == pdTRUE ) { /* Wait until the timer expires and we can access the simulated interrupt variables. *NOTE* this is not a 'real time' way of generating tick @@ -177,32 +177,32 @@ TIMECAPS xTimeCaps; Sleep( portTICK_PERIOD_MS ); } - configASSERT( xPortRunning ); + if( xPortRunning == pdTRUE ) + { + configASSERT( xPortRunning ); - /* Can't proceed if in a critical section as pvInterruptEventMutex won't - be available. */ - WaitForSingleObject( pvInterruptEventMutex, INFINITE ); + /* Can't proceed if in a critical section as pvInterruptEventMutex won't + be available. */ + WaitForSingleObject( pvInterruptEventMutex, INFINITE ); - /* The timer has expired, generate the simulated tick event. */ - ulPendingInterrupts |= ( 1 << portINTERRUPT_TICK ); + /* The timer has expired, generate the simulated tick event. */ + ulPendingInterrupts |= ( 1 << portINTERRUPT_TICK ); - /* The interrupt is now pending - notify the simulated interrupt - handler thread. Must be outside of a critical section to get here so - the handler thread can execute immediately pvInterruptEventMutex is - released. */ - configASSERT( ulCriticalNesting == 0UL ); - SetEvent( pvInterruptEvent ); + /* The interrupt is now pending - notify the simulated interrupt + handler thread. Must be outside of a critical section to get here so + the handler thread can execute immediately pvInterruptEventMutex is + released. */ + configASSERT( ulCriticalNesting == 0UL ); + SetEvent( pvInterruptEvent ); - /* Give back the mutex so the simulated interrupt handler unblocks - and can access the interrupt handler variables. */ - ReleaseMutex( pvInterruptEventMutex ); + /* Give back the mutex so the simulated interrupt handler unblocks + and can access the interrupt handler variables. */ + ReleaseMutex( pvInterruptEventMutex ); + } } - #ifdef __GNUC__ - /* Should never reach here - MingW complains if you leave this line out, - MSVC complains if you put it in. */ - return 0; - #endif + + return 0; } /*-----------------------------------------------------------*/ @@ -566,7 +566,7 @@ uint32_t ulErrorCode; void vPortEndScheduler( void ) { - exit( 0 ); + xPortRunning = pdFALSE; } /*-----------------------------------------------------------*/ diff --git a/portable/MSVC-MingW/portmacro.h b/portable/MSVC-MingW/portmacro.h index 3cbb06b5533..b1282b3d71e 100644 --- a/portable/MSVC-MingW/portmacro.h +++ b/portable/MSVC-MingW/portmacro.h @@ -50,16 +50,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32/64-bit tick type on a 32/64-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /* Hardware specifics. */ diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index f97a1edc12c..c7a8209ed2b 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -96,8 +96,8 @@ * of their memory address. */ typedef struct A_BLOCK_LINK { - struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */ - size_t xBlockSize; /*<< The size of the free block. */ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ } BlockLink_t; /*-----------------------------------------------------------*/ @@ -159,13 +159,31 @@ void * pvPortMalloc( size_t xWantedSize ) if( xWantedSize > 0 ) { /* The wanted size must be increased so it can contain a BlockLink_t - * structure in addition to the requested amount of bytes. Some - * additional increment may also be needed for alignment. */ - xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); - - if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + * structure in addition to the requested amount of bytes. */ + if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 ) { - xWantedSize += xAdditionalRequiredSize; + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + * of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + { + xWantedSize += xAdditionalRequiredSize; + } + else + { + xWantedSize = 0; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -390,7 +408,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; + xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap ); } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +420,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index 3a1df9b64c9..db9e1eb37ad 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -170,13 +170,31 @@ void * pvPortMalloc( size_t xWantedSize ) if( xWantedSize > 0 ) { /* The wanted size must be increased so it can contain a BlockLink_t - * structure in addition to the requested amount of bytes. Some - * additional increment may also be needed for alignment. */ - xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); - - if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + * structure in addition to the requested amount of bytes. */ + if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 ) { - xWantedSize += xAdditionalRequiredSize; + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + * of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + { + xWantedSize += xAdditionalRequiredSize; + } + else + { + xWantedSize = 0; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c old mode 100644 new mode 100755 index a0b83ebd30b..1936de19447 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -48,8 +48,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -295,13 +296,10 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -311,7 +309,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -320,29 +318,59 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -351,7 +379,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -362,7 +390,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/MikroC/ARM_CM4F/portmacro.h b/portable/MikroC/ARM_CM4F/portmacro.h index 1fa3d796403..fa614c31488 100644 --- a/portable/MikroC/ARM_CM4F/portmacro.h +++ b/portable/MikroC/ARM_CM4F/portmacro.h @@ -62,16 +62,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Paradigm/Tern_EE/large_untested/portmacro.h b/portable/Paradigm/Tern_EE/large_untested/portmacro.h index 753686c5a2c..ed513309c7c 100644 --- a/portable/Paradigm/Tern_EE/large_untested/portmacro.h +++ b/portable/Paradigm/Tern_EE/large_untested/portmacro.h @@ -57,12 +57,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Paradigm/Tern_EE/small/portmacro.h b/portable/Paradigm/Tern_EE/small/portmacro.h index 5918b04be60..9eb5b863023 100644 --- a/portable/Paradigm/Tern_EE/small/portmacro.h +++ b/portable/Paradigm/Tern_EE/small/portmacro.h @@ -59,12 +59,14 @@ typedef unsigned short UBaseType_t; typedef void ( __interrupt __far *pxISR )(); -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM7_LPC21xx/portmacro.h b/portable/RVDS/ARM7_LPC21xx/portmacro.h index 821d2e2f26e..95043eb8deb 100644 --- a/portable/RVDS/ARM7_LPC21xx/portmacro.h +++ b/portable/RVDS/ARM7_LPC21xx/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CA9/portmacro.h b/portable/RVDS/ARM_CA9/portmacro.h index a092952a940..65dd8174f7c 100644 --- a/portable/RVDS/ARM_CA9/portmacro.h +++ b/portable/RVDS/ARM_CA9/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM0/portmacro.h b/portable/RVDS/ARM_CM0/portmacro.h index c6cd9e859c1..54165e74cc4 100644 --- a/portable/RVDS/ARM_CM0/portmacro.h +++ b/portable/RVDS/ARM_CM0/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c old mode 100644 new mode 100755 index 551fb3441b5..2ffdd9cc8bd --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -34,10 +34,6 @@ #include "FreeRTOS.h" #include "task.h" -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - #if configMAX_SYSCALL_INTERRUPT_PRIORITY == 0 #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif @@ -65,8 +61,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -266,7 +263,8 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -276,7 +274,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -285,29 +283,59 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -316,7 +344,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -327,7 +355,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM3/portmacro.h b/portable/RVDS/ARM_CM3/portmacro.h index 5ecfb7b0e8b..c23a6b3fdbd 100644 --- a/portable/RVDS/ARM_CM3/portmacro.h +++ b/portable/RVDS/ARM_CM3/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c old mode 100644 new mode 100755 index d6fb9d7a19c..bf2fb86f76a --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -71,8 +71,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -328,7 +329,8 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -338,7 +340,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -347,29 +349,59 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -378,7 +410,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -389,7 +421,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM4F/portmacro.h b/portable/RVDS/ARM_CM4F/portmacro.h index 1b3f93b0e97..de92c8d4bd9 100644 --- a/portable/RVDS/ARM_CM4F/portmacro.h +++ b/portable/RVDS/ARM_CM4F/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c old mode 100644 new mode 100755 index 22680d77623..e0bd8c86d0f --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -83,8 +83,9 @@ #define portNVIC_SYSTICK_CLK ( 0x00000004UL ) #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to manipulate the VFP. */ @@ -200,7 +201,7 @@ void vResetPrivilege( void ); /** * @brief Enter critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; @@ -209,7 +210,7 @@ void vResetPrivilege( void ); /** * @brief Exit from critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; @@ -411,6 +412,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define * configENABLE_ERRATA_837070_WORKAROUND to 1 in your * FreeRTOSConfig.h. */ @@ -419,66 +421,101 @@ BaseType_t xPortStartScheduler( void ) #endif #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ucOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif - - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ucOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the same priority as the kernel, and the SVC @@ -550,53 +587,63 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - portMEMORY_BARRIER(); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + portMEMORY_BARRIER(); - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ portDISABLE_INTERRUPTS(); uxCriticalNesting++; - } -#else - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ void vPortExitCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); } - portMEMORY_BARRIER(); + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -604,16 +651,7 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - } -#else - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ @@ -901,7 +939,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); /* Region number. */ + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index ac89aedac97..c7cd5628951 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index ae0a4a6d943..2e81e324df9 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -65,8 +65,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -312,7 +313,8 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -322,7 +324,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -331,29 +333,59 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -362,7 +394,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -373,7 +405,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM7/r0p1/portmacro.h b/portable/RVDS/ARM_CM7/r0p1/portmacro.h index a46ec16bce0..a8fa6630e65 100644 --- a/portable/RVDS/ARM_CM7/r0p1/portmacro.h +++ b/portable/RVDS/ARM_CM7/r0p1/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index cc789b2be6f..9bcbd3c8c3d 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index fc20449a7d5..62a085023e2 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index 1430b54a9f0..3b29cbddb50 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index eb12462272d..d67a8f892aa 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index da1e0b1d67e..12657ee82bf 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -79,16 +79,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/SH2A_FPU/portmacro.h b/portable/Renesas/SH2A_FPU/portmacro.h index 8fb7e66a536..422cd59ed48 100644 --- a/portable/Renesas/SH2A_FPU/portmacro.h +++ b/portable/Renesas/SH2A_FPU/portmacro.h @@ -60,10 +60,10 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL diff --git a/portable/Rowley/MSP430F449/portmacro.h b/portable/Rowley/MSP430F449/portmacro.h index c56e436d984..7137a6e0964 100644 --- a/portable/Rowley/MSP430F449/portmacro.h +++ b/portable/Rowley/MSP430F449/portmacro.h @@ -53,12 +53,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/SDCC/Cygnal/portmacro.h b/portable/SDCC/Cygnal/portmacro.h index f4fa20e8478..04186381f62 100644 --- a/portable/SDCC/Cygnal/portmacro.h +++ b/portable/SDCC/Cygnal/portmacro.h @@ -61,12 +61,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Softune/MB91460/portmacro.h b/portable/Softune/MB91460/portmacro.h index 2a3c6f189fb..9ae6959c46b 100644 --- a/portable/Softune/MB91460/portmacro.h +++ b/portable/Softune/MB91460/portmacro.h @@ -59,12 +59,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Softune/MB96340/portmacro.h b/portable/Softune/MB96340/portmacro.h index c953083f13e..827874fde9d 100644 --- a/portable/Softune/MB96340/portmacro.h +++ b/portable/Softune/MB96340/portmacro.h @@ -65,12 +65,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Tasking/ARM_CM4F/port.c b/portable/Tasking/ARM_CM4F/port.c old mode 100644 new mode 100755 index d33a3982630..4ba5da739d0 --- a/portable/Tasking/ARM_CM4F/port.c +++ b/portable/Tasking/ARM_CM4F/port.c @@ -41,8 +41,9 @@ #define portNVIC_SYSTICK_CLK 0x00000004 #define portNVIC_SYSTICK_INT 0x00000002 #define portNVIC_SYSTICK_ENABLE 0x00000001 -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16 ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24 ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ #define portVECTACTIVE_MASK ( 0xFFUL ) @@ -70,7 +71,7 @@ /* The priority used by the kernel is assigned to a variable to make access * from inline assembler easier. */ -const uint32_t ulKernelPriority = configKERNEL_INTERRUPT_PRIORITY; +const uint32_t ulKernelPriority = portMIN_INTERRUPT_PRIORITY; /* Each task maintains its own interrupt status in the critical nesting * variable. */ @@ -265,5 +266,4 @@ void prvSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ *( portNVIC_SYSTICK_LOAD ) = ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; *( portNVIC_SYSTICK_CTRL ) = portNVIC_SYSTICK_CLK | portNVIC_SYSTICK_INT | portNVIC_SYSTICK_ENABLE; -} -/*-----------------------------------------------------------*/ +} \ No newline at end of file diff --git a/portable/Tasking/ARM_CM4F/portmacro.h b/portable/Tasking/ARM_CM4F/portmacro.h index a59418cb1ab..3371f34f47a 100644 --- a/portable/Tasking/ARM_CM4F/portmacro.h +++ b/portable/Tasking/ARM_CM4F/portmacro.h @@ -58,16 +58,18 @@ typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h index ac00ff54ab0..b82e48695d9 100644 --- a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h +++ b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; typedef void (*portvectorfunc)(void); -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/ThirdParty/GCC/ARC_EM_HS/port.c b/portable/ThirdParty/GCC/ARC_EM_HS/port.c index 57be56a7f5f..0e023088efd 100644 --- a/portable/ThirdParty/GCC/ARC_EM_HS/port.c +++ b/portable/ThirdParty/GCC/ARC_EM_HS/port.c @@ -251,16 +251,8 @@ void vPortEndTask( void ) uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - - /* Allocate a Newlib reent structure that is specific to this task. - * Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. */ - struct _reent xNewLib_reent; + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h b/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h index d41ad8534cf..49b15b09959 100644 --- a/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h @@ -82,12 +82,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef unsigned int TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) diff --git a/portable/ThirdParty/GCC/ARC_v1/port.c b/portable/ThirdParty/GCC/ARC_v1/port.c index d3de6fa3d60..4dbdc7e601c 100644 --- a/portable/ThirdParty/GCC/ARC_v1/port.c +++ b/portable/ThirdParty/GCC/ARC_v1/port.c @@ -251,16 +251,8 @@ void vPortEndTask( void ) uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - - /* Allocate a Newlib reent structure that is specific to this task. - * Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. */ - struct _reent xNewLib_reent; + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/portable/ThirdParty/GCC/ARC_v1/portmacro.h b/portable/ThirdParty/GCC/ARC_v1/portmacro.h index d8850e75897..72fbb49759e 100644 --- a/portable/ThirdParty/GCC/ARC_v1/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_v1/portmacro.h @@ -81,12 +81,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef unsigned int TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 40e59ff2ba6..3e7714eb836 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -56,12 +56,14 @@ typedef uint8_t StackType_t; typedef int8_t BaseType_t; typedef uint8_t UBaseType_t; -#if configUSE_16_BIT_TICKS == 1 +#if configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index bd0842fbf55..d634c8b264e 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -73,7 +73,7 @@ typedef struct THREAD { pthread_t pthread; - pdTASK_CODE pxCode; + TaskFunction_t pxCode; void * pvParams; BaseType_t xDying; struct event * ev; @@ -115,6 +115,9 @@ static void prvPortYieldFromISR( void ); /*-----------------------------------------------------------*/ static void prvFatalError( const char * pcCall, + int iErrno ) __attribute__ ((__noreturn__)); + +void prvFatalError( const char * pcCall, int iErrno ) { fprintf( stderr, "%s: %s\n", pcCall, strerror( iErrno ) ); @@ -124,9 +127,9 @@ static void prvFatalError( const char * pcCall, /* * See header file for description. */ -portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, - portSTACK_TYPE * pxEndOfStack, - pdTASK_CODE pxCode, +portSTACK_TYPE * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, void * pvParameters ) { Thread_t * thread; @@ -141,7 +144,7 @@ portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, */ thread = ( Thread_t * ) ( pxTopOfStack + 1 ) - 1; pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; - ulStackSize = ( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); thread->pxCode = pxCode; thread->pvParams = pvParameters; @@ -340,7 +343,7 @@ static uint64_t prvGetTimeNs( void ) clock_gettime( CLOCK_MONOTONIC, &t ); - return t.tv_sec * 1000000000ULL + t.tv_nsec; + return ( uint64_t )t.tv_sec * ( uint64_t )1000000000UL + ( uint64_t )t.tv_nsec; } static uint64_t prvStartTimeNs; diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index a5173871359..68655861202 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -68,10 +68,12 @@ typedef unsigned long TickType_t; /*-----------------------------------------------------------*/ /* Architecture specifics. */ +#define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) -#define portTICK_RATE_MICROSECONDS ( ( portTickType ) 1000000 / configTICK_RATE_HZ ) +#define portTICK_RATE_MICROSECONDS ( ( TickType_t ) 1000000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index c2ea069cd08..6ad352a04ba 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -60,16 +60,18 @@ typedef int32_t BaseType_t; typedef uint32_t UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -78,6 +80,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS @@ -177,8 +180,8 @@ #else extern void vTaskEnterCritical( void ); extern void vTaskExitCritical( void ); - extern UBaseType_t vTaskEnterCriticalFromISR( void ); - extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); + extern portBASE_TYPE vTaskEnterCriticalFromISR( void ); + extern void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); #define portENTER_CRITICAL() vTaskEnterCritical() #define portEXIT_CRITICAL() vTaskExitCritical() #define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index fb3d571b519..73452e068a0 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -967,24 +967,24 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock ) { uint32_t ulBit; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) ulBit = 1u << (spin_lock_get_num(spinLock) & 0x7u); - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) ulBit = 1u << spin_lock_get_num(spinLock); /* reduce to range 0-24 */ ulBit |= ulBit << 8u; ulBit >>= 8u; - #endif /* configUSE_16_BIT_TICKS */ + #endif /* configTICK_TYPE_WIDTH_IN_BITS */ return ( EventBits_t ) ulBit; } static inline EventBits_t prvGetAllEventGroupBits() { - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) return (EventBits_t) 0xffu; - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) return ( EventBits_t ) 0xffffffu; - #endif /* configUSE_16_BIT_TICKS */ + #endif /* configTICK_TYPE_WIDTH_IN_BITS */ } void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave ) diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index d66ca3da5a0..575659ac597 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -119,12 +119,14 @@ typedef portBASE_TYPE BaseType_t; typedef unsigned portBASE_TYPE UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ @@ -333,6 +335,8 @@ /*-----------------------------------------------------------*/ /* Architecture specifics. */ + #define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 4 diff --git a/portable/ThirdParty/XCC/Xtensa/portmacro.h b/portable/ThirdParty/XCC/Xtensa/portmacro.h index a0b87a007a6..c81576c9f78 100644 --- a/portable/ThirdParty/XCC/Xtensa/portmacro.h +++ b/portable/ThirdParty/XCC/Xtensa/portmacro.h @@ -70,12 +70,14 @@ typedef portSTACK_TYPE StackType_t; typedef portBASE_TYPE BaseType_t; typedef unsigned portBASE_TYPE UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/WizC/PIC18/portmacro.h b/portable/WizC/PIC18/portmacro.h index fea77758b77..ad70560e4ad 100644 --- a/portable/WizC/PIC18/portmacro.h +++ b/portable/WizC/PIC18/portmacro.h @@ -58,12 +58,14 @@ typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFF ) -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portBYTE_ALIGNMENT 1 diff --git a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h index 29e8270b571..a2b4c16a40b 100644 --- a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h +++ b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h @@ -58,12 +58,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/oWatcom/16BitDOS/PC/portmacro.h b/portable/oWatcom/16BitDOS/PC/portmacro.h index 6c8e022b410..ab4eea87210 100644 --- a/portable/oWatcom/16BitDOS/PC/portmacro.h +++ b/portable/oWatcom/16BitDOS/PC/portmacro.h @@ -57,12 +57,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index c6ec895c2a6..37ab1f24dff 100644 --- a/queue.c +++ b/queue.c @@ -64,14 +64,14 @@ typedef struct QueuePointers { - int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ - int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ + int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */ } QueuePointers_t; typedef struct SemaphoreData { - TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ - UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ + TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ } SemaphoreData_t; /* Semaphores do not actually store or copy data, so have an item size of @@ -99,27 +99,27 @@ typedef struct SemaphoreData */ typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - int8_t * pcHead; /*< Points to the beginning of the queue storage area. */ - int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */ + int8_t * pcHead; /**< Points to the beginning of the queue storage area. */ + int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */ union { - QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ - SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */ } u; - List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ - List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ - volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */ - UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ - UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */ + UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( configUSE_QUEUE_SETS == 1 ) @@ -268,14 +268,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cTxLock ) != queueINT8_MAX ); \ ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ } \ - } + } while( 0 ) /* * Macro to increment cRxLock member of the queue data structure. It is @@ -283,14 +283,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cRxLock ) != queueINT8_MAX ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ - } + } while( 0 ) /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -423,6 +423,55 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, #endif /* configSUPPORT_STATIC_ALLOCATION */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( ppxStaticQueue ); + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Check if the queue was statically allocated. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE ) + { + if( ppucQueueStorage != NULL ) + { + *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead; + } + + *ppxStaticQueue = ( StaticQueue_t * ) pxQueue; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* configSUPPORT_DYNAMIC_ALLOCATION */ + { + /* Queue must have been statically allocated. */ + if( ppucQueueStorage != NULL ) + { + *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead; + } + + *ppxStaticQueue = ( StaticQueue_t * ) pxQueue; + xReturn = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + return xReturn; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, @@ -1058,7 +1107,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const BaseType_t xCopyPosition ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1086,7 +1135,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1211,7 +1260,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1221,7 +1270,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; /* Similar to xQueueGenericSendFromISR() but used with semaphores where the @@ -1257,7 +1306,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1377,7 +1426,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1916,7 +1965,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1938,7 +1987,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1998,7 +2047,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -2008,7 +2057,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; int8_t * pcOriginalReadPosition; Queue_t * const pxQueue = xQueue; @@ -2032,7 +2081,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2053,7 +2102,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index 4c80bfd742e..7a20b61e44a 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -70,7 +70,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbRECEIVE_COMPLETED */ /* If user has provided a per-instance receive complete callback, then @@ -95,10 +95,10 @@ #ifndef sbRECEIVE_COMPLETED_FROM_ISR #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ + xSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,8 +109,8 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ - } + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); \ + } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) @@ -147,7 +147,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -155,7 +155,7 @@ */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETED( pxStreamBuffer ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ pxStreamBuffer->pxSendCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ @@ -164,7 +164,7 @@ { \ sbSEND_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -172,10 +172,10 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,14 +186,14 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ - } + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); \ + } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxSendCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ @@ -202,7 +202,7 @@ { \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) @@ -436,13 +436,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if ( configASSERT_DEFINED == 1 ) - { + /* Sanity check that the size of the structure used to declare a * variable of type StaticStreamBuffer_t equals the size of the real * message buffer structure. */ - volatile size_t xSize = sizeof( StaticStreamBuffer_t ); - configASSERT( xSize == sizeof( StreamBuffer_t ) ); - } /*lint !e529 xSize is referenced is configASSERT() is defined. */ + configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) @@ -474,6 +472,34 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, #endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + uint8_t ** ppucStreamBufferStorageArea, + StaticStreamBuffer_t ** ppxStaticStreamBuffer ) + { + BaseType_t xReturn; + const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + configASSERT( ppucStreamBufferStorageArea ); + configASSERT( ppxStaticStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) != ( uint8_t ) 0 ) + { + *ppucStreamBufferStorageArea = pxStreamBuffer->pucBuffer; + *ppxStaticStreamBuffer = ( StaticStreamBuffer_t * ) pxStreamBuffer; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * pxStreamBuffer = xStreamBuffer; @@ -1188,11 +1214,11 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1208,7 +1234,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1219,11 +1245,11 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1239,7 +1265,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1372,9 +1398,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* The value written just has to be identifiable when looking at the * memory. Don't use 0xA5 as that is the stack fill value and could * result in confusion as to what is actually being observed. */ - const BaseType_t xWriteValue = 0x55; - configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); - } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ + #define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) + configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); + } #endif ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ diff --git a/tasks.c b/tasks.c index 5e862d8b131..28193e20f9d 100644 --- a/tasks.c +++ b/tasks.c @@ -138,7 +138,7 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ /* Find the highest priority queue that contains ready tasks. */ \ @@ -152,7 +152,7 @@ * the same priority get an equal share of the processor time. */ \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -178,14 +178,14 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority; \ \ /* Find the highest priority list that contains ready tasks. */ \ portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + } while( 0 ) /*-----------------------------------------------------------*/ @@ -193,12 +193,12 @@ * is being referenced from a ready list. If it is referenced from a delayed * or suspended list then it won't be in a ready list. */ #define taskRESET_READY_PRIORITY( uxPriority ) \ - { \ + do { \ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ { \ portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ } \ - } + } while( 0 ) #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ @@ -207,7 +207,7 @@ /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick * count overflows. */ #define taskSWITCH_DELAYED_LISTS() \ - { \ + do { \ List_t * pxTemp; \ \ /* The delayed tasks list should be empty when the lists are switched. */ \ @@ -218,7 +218,7 @@ pxOverflowDelayedTaskList = pxTemp; \ xNumOfOverflows++; \ prvResetNextTaskUnblockTime(); \ - } + } while( 0 ) /*-----------------------------------------------------------*/ @@ -226,11 +226,13 @@ * Place the task represented by pxTCB into the appropriate ready list for * the task. It is inserted at the end of the list. */ -#define prvAddTaskToReadyList( pxTCB ) \ - traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ - taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ - listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ - tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +#define prvAddTaskToReadyList( pxTCB ) \ + do { \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \ + } while( 0 ) /*-----------------------------------------------------------*/ /* @@ -249,10 +251,12 @@ * the scheduler that the value should not be changed - in which case it is the * responsibility of whichever module is using the value to ensure it gets set back * to its original value when it is released. */ -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL #endif /* Task state. */ @@ -300,45 +304,45 @@ typedef BaseType_t TaskRunning_t; */ typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ #if ( portUSING_MPU_WRAPPERS == 1 ) - xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as confNUM_CORES. */ #endif - ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ - ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ - UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ - StackType_t * pxStack; /*< Points to the start of the stack. */ + ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /**< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */ + StackType_t * pxStack; /**< Points to the start of the stack. */ #if ( configNUMBER_OF_CORES > 1 ) - volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */ - UBaseType_t uxTaskAttributes; /*< Task's attributes - currently used to identify the idle tasks. */ + volatile TaskRunning_t xTaskRunState; /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */ + UBaseType_t uxTaskAttributes; /**< Task's attributes - currently used to identify the idle tasks. */ #endif - char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted. */ + BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) - StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ + StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ #endif #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ - UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */ #endif #if ( configUSE_MUTEXES == 1 ) - UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */ UBaseType_t uxMutexesHeld; #endif @@ -351,11 +355,11 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) @@ -366,7 +370,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to /* See the comments in FreeRTOS.h with the definition of * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( INCLUDE_xTaskAbortDelay == 1 ) @@ -395,23 +399,23 @@ portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but * doing so breaks some kernel aware debuggers and debuggers that rely on removing * the static qualifier. */ -PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ -PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ #if ( INCLUDE_vTaskDelete == 1 ) - PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */ PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; #endif #if ( INCLUDE_vTaskSuspend == 1 ) - PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */ + PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */ #endif @@ -431,7 +435,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ] = { NULL }; /*< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ] = { NULL }; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -456,8 +460,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUM_CORES ] = { 0UL }; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUM_CORES ] = { 0UL }; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUM_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUM_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -539,9 +543,9 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * void prvMinimalIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; #if ( configNUMBER_OF_CORES > 1 ) - static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; #endif /* @@ -1584,10 +1588,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Allocate and initialize memory for the task's TLS Block. */ - configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock ); + configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack ); } #endif @@ -2281,7 +2285,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) { TCB_t const * pxTCB; - UBaseType_t uxReturn, uxSavedInterruptState; + UBaseType_t uxReturn; + portBASE_TYPE xSavedInterruptState; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -2301,14 +2306,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptState = taskENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptState ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptState ); return uxReturn; } @@ -2372,7 +2377,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* The priority of a task other than the currently * running task is being raised. Is the priority being * raised above that of the running task? */ - if( uxNewPriority >= pxCurrentTCB->uxPriority ) + if( uxNewPriority > pxCurrentTCB->uxPriority ) { xYieldRequired = pdTRUE; } @@ -2909,7 +2914,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( configNUMBER_OF_CORES == 1 ) { /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { /* This yield may not cause the task just resumed to run, * but will leave the lists in the correct state for the @@ -2954,7 +2959,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { BaseType_t xYieldRequired = pdFALSE; TCB_t * const pxTCB = xTaskToResume; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToResume ); @@ -2976,7 +2981,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -2989,7 +2994,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xYieldRequired = pdTRUE; @@ -3032,7 +3037,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xYieldRequired; } @@ -3252,7 +3257,7 @@ void vTaskStartScheduler( void ) * starts to run. */ portDISABLE_INTERRUPTS(); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Switch C-Runtime's TLS Block to point to the TLS * block specific to the task that will run first. */ @@ -3498,7 +3503,7 @@ BaseType_t xTaskResumeAll( void ) { /* If the moved task has a priority higher than the current * task then a yield must be performed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xYieldPendings[ xCoreID ] = pdTRUE; } @@ -3615,7 +3620,7 @@ TickType_t xTaskGetTickCount( void ) TickType_t xTaskGetTickCountFromISR( void ) { TickType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are @@ -3633,11 +3638,11 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { xReturn = xTickCount; } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -3864,6 +3869,53 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #endif /* INCLUDE_xTaskGetHandle */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) + { + BaseType_t xReturn; + TCB_t * pxTCB; + + configASSERT( ppuxStackBuffer != NULL ); + configASSERT( ppxTaskBuffer != NULL ); + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 ) + { + if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ) + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = ( StaticTask_t * ) pxTCB; + xReturn = pdTRUE; + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */ + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = ( StaticTask_t * ) pxTCB; + xReturn = pdTRUE; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */ + + return xReturn; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, @@ -3913,7 +3965,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); #else - *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif } } @@ -4417,18 +4469,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -4488,7 +4540,7 @@ BaseType_t xTaskIncrementTick( void ) #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -4533,7 +4585,7 @@ BaseType_t xTaskIncrementTick( void ) } #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Switch C-Runtime's TLS Block to point to the TLS * Block specific to this task. */ @@ -4621,7 +4673,7 @@ BaseType_t xTaskIncrementTick( void ) } #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Switch C-Runtime's TLS Block to point to the TLS * Block specific to this task. */ @@ -5093,7 +5145,8 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION( prvIdleTask, pvParameters ) + +portTASK_FUNCTION( prvIdleTask, pvParameters ) { /* Stop warnings. */ ( void ) pvParameters; @@ -5155,13 +5208,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( configUSE_IDLE_HOOK == 1 ) { - extern void vApplicationIdleHook( void ); - - /* Call the user defined function from within the idle task. This - * allows the application designer to add background functionality - * without the overhead of a separate task. - * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, - * CALL A FUNCTION THAT MIGHT BLOCK. */ + /* Call the user defined function from within the idle task. */ vApplicationIdleHook(); } #endif /* configUSE_IDLE_HOOK */ @@ -5692,7 +5739,7 @@ static void prvCheckTasksWaitingTermination( void ) * want to allocate and clean RAM statically. */ portCLEAN_UP_TCB( pxTCB ); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Free up the memory allocated for the task's TLS Block. */ configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock ); @@ -5777,13 +5824,13 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; - UBaseType_t uxSavedInterruptStatus = 0; + portBASE_TYPE xSavedInterruptStatus; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK(); { xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; } - portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK( xSavedInterruptStatus ); return xReturn; } @@ -6245,13 +6292,13 @@ static void prvResetNextTaskUnblockTime( void ) #if ( configNUMBER_OF_CORES > 1 ) - UBaseType_t vTaskEnterCriticalFromISR( void ) + portBASE_TYPE vTaskEnterCriticalFromISR( void ) { - UBaseType_t uxSavedInterruptStatus = 0; + portBASE_TYPE xSavedInterruptStatus = 0; if( xSchedulerRunning != pdFALSE ) { - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { @@ -6265,7 +6312,7 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } - return uxSavedInterruptStatus; + return xSavedInterruptStatus; } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ @@ -6371,7 +6418,7 @@ static void prvResetNextTaskUnblockTime( void ) #if ( configNUMBER_OF_CORES > 1 ) - void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) + void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ) { if( xSchedulerRunning != pdFALSE ) { @@ -6386,7 +6433,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { portRELEASE_ISR_LOCK(); - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); } else { @@ -7013,7 +7060,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -7038,7 +7085,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -7152,7 +7199,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -7168,7 +7215,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -7193,7 +7240,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -7263,7 +7310,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -7331,9 +7378,9 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* configUSE_TASK_NOTIFICATIONS */ /*-----------------------------------------------------------*/ -#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) +#if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) + configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) { configRUN_TIME_COUNTER_TYPE ulReturn = 0; @@ -7348,14 +7395,14 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) +#if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) + configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES; + ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) ( portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES ); /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; @@ -7378,7 +7425,27 @@ TickType_t uxTaskResetEventItemValue( void ) return ulReturn; } -#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +#endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) + { + return ulTaskGetRunTimeCounter( xIdleTaskHandle ); + } + +#endif +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) + { + return ulTaskGetRunTimePercent( xIdleTaskHandle ); + } + +#endif /*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, diff --git a/timers.c b/timers.c index 68e53c0ca44..cff986ae7ff 100644 --- a/timers.c +++ b/timers.c @@ -74,15 +74,15 @@ /* The definition of the timers themselves. */ typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ - TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ - void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */ + void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif - uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ + uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ } xTIMER; /* The old xTIMER name is maintained above then typedefed to the new Timer_t @@ -96,8 +96,8 @@ * and xCallbackParametersType respectively. */ typedef struct tmrTimerParameters { - TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ - Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ + TickType_t xMessageValue; /**< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /**< The timer to which the command will be applied. */ } TimerParameter_t; @@ -113,7 +113,7 @@ * that is used to determine which message type is valid. */ typedef struct tmrTimerQueueMessage { - BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + BaseType_t xMessageID; /**< The command being sent to the timer service task. */ union { TimerParameter_t xTimerParameters; @@ -159,7 +159,7 @@ * task. Other tasks communicate with the timer service task using the * xTimerQueue queue. */ - static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; /* * Called by the timer service task to interpret and process a command it @@ -551,6 +551,30 @@ } /*-----------------------------------------------------------*/ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) + { + BaseType_t xReturn; + Timer_t * pxTimer = xTimer; + + configASSERT( ppxTimerBuffer != NULL ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) != 0 ) + { + *ppxTimerBuffer = ( StaticTimer_t * ) pxTimer; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ { Timer_t * pxTimer = xTimer; @@ -616,8 +640,6 @@ #if ( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) { - extern void vApplicationDaemonTaskStartupHook( void ); - /* Allow the application writer to execute some code in the context of * this task at the point the task starts executing. This is useful if the * application includes initialisation code that would benefit from From 14cec43e3f99903726543a5f7017feb63e723763 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 18 Apr 2023 22:15:35 +0800 Subject: [PATCH 150/164] Smp dev merge main 20230410 (#74) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi --- stream_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream_buffer.c b/stream_buffer.c index 7a20b61e44a..e3c62ab1da2 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -98,7 +98,7 @@ do { \ portBASE_TYPE xSavedInterruptStatus; \ \ - xSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); \ + xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ From 28387df8f5df1938a81010b8b7fed26658e47ca6 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Wed, 19 Apr 2023 04:32:47 +0800 Subject: [PATCH 151/164] Not yield for running task in prvYieldForTask (#72) * Raise priority of a running task should not alter other cores --- tasks.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/tasks.c b/tasks.c index 28193e20f9d..7b1f828276f 100644 --- a/tasks.c +++ b/tasks.c @@ -798,6 +798,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /* No task should yield for this one if it is a lower priority * than priority level of currently ready tasks. */ if( pxTCB->uxPriority >= uxTopReadyPriority ) + #else + + /* Yield is not required for a task which is already running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) #endif { xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority; @@ -818,24 +822,29 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) { - if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt ) + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) + #endif { - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) - #endif + if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt ) { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif { - xLowestPriorityToPreempt = xCurrentCoreTaskPriority; - xLowestPriorityCore = xCoreID; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriorityToPreempt = xCurrentCoreTaskPriority; + xLowestPriorityCore = xCoreID; + } } } - } - else - { - mtCOVERAGE_TEST_MARKER(); + else + { + mtCOVERAGE_TEST_MARKER(); + } } #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) From 23ce58ec53e145f296c6f7e0c36fec29b8aec2cd Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 20 Apr 2023 13:30:07 +0800 Subject: [PATCH 152/164] Remove unreachable code in prvSelectHighestPriorityTask (#70) * Remove unreachable code in prvSelectHighestPriorityTask * Remove unreachable assert condition * Update comment --- tasks.c | 135 +++++++++++++++++++++++++++----------------------------- 1 file changed, 65 insertions(+), 70 deletions(-) diff --git a/tasks.c b/tasks.c index 7b1f828276f..48e3b1540d2 100644 --- a/tasks.c +++ b/tasks.c @@ -1024,106 +1024,101 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started. * The scheduler should be able to select a task to run when uxCurrentPriority - * is tskIDLE_PRIORITY. */ - configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); + * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow + * tskIDLE_PRIORITY. */ uxCurrentPriority--; } - if( xTaskScheduled == pdTRUE ) + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { - configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) == pdTRUE ); - - #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( xPriorityDropped != pdFALSE ) { - if( xPriorityDropped != pdFALSE ) - { - /* There may be several ready tasks that were being prevented from running because there was - * a higher priority task running. Now that the last of the higher priority tasks is no longer - * running, make sure all the other idle tasks yield. */ - BaseType_t x; + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + BaseType_t x; - for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) + { + if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { - if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) - { - prvYieldCore( x ); - } + prvYieldCore( x ); } } } - #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + } + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ - #if ( configUSE_CORE_AFFINITY == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + { + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { - if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + /* A ready task was just evicted from this core. See if it can be + * scheduled on any other core. */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t x; + + if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { - /* A ready task was just evicted from this core. See if it can be - * scheduled on any other core. */ - UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; - BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; - BaseType_t xLowestPriorityCore = -1; - BaseType_t x; + xLowestPriority = xLowestPriority - 1; + } - if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) - { - xLowestPriority = xLowestPriority - 1; - } + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } - if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) - { - /* The ready task that was removed from this core is not excluded from it. - * Only look at the intersection of the cores the removed task is allowed to run - * on with the cores that the new task is excluded from. It is possible that the - * new task was only placed onto this core because it is excluded from another. - * Check to see if the previous task could run on one of those cores. */ - uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); - } - else - { - /* The ready task that was removed from this core is excluded from it. */ - } + uxCoreMap &= ( ( 1 << configNUMBER_OF_CORES ) - 1 ); - uxCoreMap &= ( ( 1 << configNUMBER_OF_CORES ) - 1 ); + for( x = ( configNUMBER_OF_CORES - 1 ); x >= 0; x-- ) + { + UBaseType_t uxCore = ( UBaseType_t ) x; + BaseType_t xTaskPriority; - for( x = ( configNUMBER_OF_CORES - 1 ); x >= 0; x-- ) + if( ( uxCoreMap & ( 1 << uxCore ) ) != 0 ) { - UBaseType_t uxCore = ( UBaseType_t ) x; - BaseType_t xTaskPriority; + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; - if( ( uxCoreMap & ( 1 << uxCore ) ) != 0 ) + if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) { - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; - - if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) - { - xTaskPriority = xTaskPriority - ( BaseType_t ) 1; - } + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } - uxCoreMap &= ~( 1 << uxCore ); + uxCoreMap &= ~( 1 << uxCore ); - if( ( xTaskPriority < xLowestPriority ) && - ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && - ( xYieldPendings[ uxCore ] == pdFALSE ) ) + if( ( xTaskPriority < xLowestPriority ) && + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && + ( xYieldPendings[ uxCore ] == pdFALSE ) ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif - { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; - } + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; } } } + } - if( taskVALID_CORE_ID( xLowestPriorityCore ) == pdTRUE ) - { - prvYieldCore( xLowestPriorityCore ); - } + if( xLowestPriorityCore >= 0 ) + { + prvYieldCore( xLowestPriorityCore ); } } - #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */ } + #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */ } #endif /* ( configNUMBER_OF_CORES > 1 ) */ From 9da68366310e39ac1bcfab7ee4f2e52c9b754193 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 20 Apr 2023 13:55:48 +0800 Subject: [PATCH 153/164] Move static idle task memory to global scope (#75) --- tasks.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tasks.c b/tasks.c index 48e3b1540d2..d082b60b21b 100644 --- a/tasks.c +++ b/tasks.c @@ -465,6 +465,11 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t #endif +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) + static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; +#endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ + /*lint -restore */ /*-----------------------------------------------------------*/ @@ -3171,9 +3176,6 @@ static BaseType_t prvCreateIdleTasks( void ) } else { - static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ]; - static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; - xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, cIdleName, configMINIMAL_STACK_SIZE, From 2d281c7951acb0443488a89b694434ebf2d55be0 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 21 Apr 2023 12:34:03 +0800 Subject: [PATCH 154/164] Update XMOS AICORE conflict (#77) * Define portBASE_TYPE in XMOS AICORE porting * Update enter critical from ISR API --- portable/ThirdParty/xClang/XCOREAI/portmacro.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/portable/ThirdParty/xClang/XCOREAI/portmacro.h b/portable/ThirdParty/xClang/XCOREAI/portmacro.h index aef12cf3749..019d29a3c7a 100644 --- a/portable/ThirdParty/xClang/XCOREAI/portmacro.h +++ b/portable/ThirdParty/xClang/XCOREAI/portmacro.h @@ -26,6 +26,8 @@ typedef double portDOUBLE; typedef int32_t BaseType_t; typedef uint32_t UBaseType_t; +#define portBASE_TYPE BaseType_t + #if( configUSE_16_BIT_TICKS == 1 ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff @@ -163,8 +165,8 @@ void vTaskExitCritical(void); #define portENTER_CRITICAL() vTaskEnterCritical() #define portEXIT_CRITICAL() vTaskExitCritical() -extern UBaseType_t vTaskEnterCriticalFromISR( void ); -extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); +extern portBASE_TYPE vTaskEnterCriticalFromISR( void ); +extern void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); #define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR #define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR From e35fd38579a5087a28e29588976ed5d74e1f75c0 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 21 Apr 2023 16:20:09 +0800 Subject: [PATCH 155/164] Fix run time stats for SMP (#76) * Update get idle tasks stats * Fix get task stats * Fix missing configNUM_CORES --- tasks.c | 67 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/tasks.c b/tasks.c index d082b60b21b..d54975086d7 100644 --- a/tasks.c +++ b/tasks.c @@ -460,8 +460,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUM_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUM_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ + PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -7388,17 +7388,10 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) { - configRUN_TIME_COUNTER_TYPE ulReturn = 0; - - for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) - { - ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; - } - - return ulReturn; + return xTask->ulRunTimeCounter; } -#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +#endif /*-----------------------------------------------------------*/ #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -7406,9 +7399,8 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; - configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; - ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) ( portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES ); + ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; @@ -7416,12 +7408,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Avoid divide by zero errors. */ if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) { - for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) - { - ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; - } - - ulReturn = ulRunTimeCounter / ulTotalTime; + ulReturn = xTask->ulRunTimeCounter / ulTotalTime; } else { @@ -7434,24 +7421,54 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ /*-----------------------------------------------------------*/ -#if ( configGENERATE_RUN_TIME_STATS == 1 ) +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) { - return ulTaskGetRunTimeCounter( xIdleTaskHandle ); + configRUN_TIME_COUNTER_TYPE ulReturn = 0; + + for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) + { + ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + + return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( configGENERATE_RUN_TIME_STATS == 1 ) +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) { - return ulTaskGetRunTimePercent( xIdleTaskHandle ); + configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; + + ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES; + + /* For percentage calculations. */ + ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) + { + for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) + { + ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + + ulReturn = ulRunTimeCounter / ulTotalTime; + } + else + { + ulReturn = 0; + } + + return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, From 096a58240979950ba9965c307d07a85c959b3405 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 24 Apr 2023 14:14:40 +0800 Subject: [PATCH 156/164] Update the uxSchedulerSuspended after prvCheckForRunStateChange (#62) * Update the uxSchedulerSuspended after the prvCheckForRunStateChange to prevent race condition in fromISR APIs --- tasks.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/tasks.c b/tasks.c index d54975086d7..400da8f73c1 100644 --- a/tasks.c +++ b/tasks.c @@ -686,7 +686,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; static void prvCheckForRunStateChange( void ) { UBaseType_t uxPrevCriticalNesting; - UBaseType_t uxPrevSchedulerSuspended; TCB_t * pxThisTCB; /* This should be skipped if called from an ISR. If the task on the current @@ -710,24 +709,19 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * and reacquire the correct locks. And then, do it all over again * if our state changed again during the reacquisition. */ uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT(); - uxPrevSchedulerSuspended = uxSchedulerSuspended; - - /* This must only be called the first time we enter into a critical - * section, otherwise it could context switch in the middle of a - * critical section. */ - configASSERT( ( uxPrevCriticalNesting + uxPrevSchedulerSuspended ) == 1U ); if( uxPrevCriticalNesting > 0U ) { portSET_CRITICAL_NESTING_COUNT( 0U ); + portRELEASE_ISR_LOCK(); } else { - portGET_ISR_LOCK(); - uxSchedulerSuspended = 0U; + /* The scheduler is suspended. uxSchedulerSuspended is updated + * only when the task is not requested to yield. */ + mtCOVERAGE_TEST_MARKER(); } - portRELEASE_ISR_LOCK(); portRELEASE_TASK_LOCK(); portMEMORY_BARRIER(); @@ -745,11 +739,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; portGET_ISR_LOCK(); portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting ); - uxSchedulerSuspended = uxPrevSchedulerSuspended; if( uxPrevCriticalNesting == 0U ) { - /* uxPrevSchedulerSuspended must be 1. */ portRELEASE_ISR_LOCK(); } } @@ -3367,14 +3359,11 @@ void vTaskSuspendAll( void ) portSOFTWARE_BARRIER(); portGET_TASK_LOCK(); - portGET_ISR_LOCK(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - portRELEASE_ISR_LOCK(); - if( uxSchedulerSuspended == 1U ) + /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The + * purpose is to prevent altering the variable when fromISR APIs are readying + * it. */ + if( uxSchedulerSuspended == 0U ) { if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { @@ -3390,6 +3379,13 @@ void vTaskSuspendAll( void ) mtCOVERAGE_TEST_MARKER(); } + portGET_ISR_LOCK(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); + portCLEAR_INTERRUPT_MASK( ulState ); } else From 37ebbd8b94e0b4cb1a57ebca42b305b939d9536b Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 25 Apr 2023 13:54:42 +0800 Subject: [PATCH 157/164] Fix SMP dev branch CI errors (#79) * Fix uncrustify * Update lexicon * Remove tailing space * Ignore XMOS AICORE header check --- .github/lexicon.txt | 1 + .github/scripts/kernel_checker.py | 3 ++- portable/ThirdParty/xClang/XCOREAI/portasm.S | 2 +- tasks.c | 9 ++++----- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 305498fdc59..5e7d6dbbf75 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1521,6 +1521,7 @@ prstc prttc prv prvaddcurrenttasktodelayedlist +prvcheckforrunstatechange prvcheckinterfaces prvchecktaskswaitingtermination prvcopydatatoqueue diff --git a/.github/scripts/kernel_checker.py b/.github/scripts/kernel_checker.py index f24bbf25722..a0706932fe0 100755 --- a/.github/scripts/kernel_checker.py +++ b/.github/scripts/kernel_checker.py @@ -87,7 +87,8 @@ r'.*\.git.*', r'.*portable/IAR/AtmelSAM7S64/.*AT91SAM7.*', r'.*portable/GCC/ARM7_AT91SAM7S/.*', - r'.*portable/MPLAB/PIC18F/stdio.h' + r'.*portable/MPLAB/PIC18F/stdio.h', + r'.*portable/ThirdParty/xClang/XCOREAI/*' ] KERNEL_THIRD_PARTY_PATTERNS = [ diff --git a/portable/ThirdParty/xClang/XCOREAI/portasm.S b/portable/ThirdParty/xClang/XCOREAI/portasm.S index 702e9a2f021..7445672a08e 100644 --- a/portable/ThirdParty/xClang/XCOREAI/portasm.S +++ b/portable/ThirdParty/xClang/XCOREAI/portasm.S @@ -87,7 +87,7 @@ rtos_interrupt_callback_common: bla r1} /* and call the callback function. */ {set sp, r4 /* Restore the task's SP now. */ - + get r11, id} /* Get the logical core ID into r11. */ ldaw r0, dp[rtos_core_map] ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */ diff --git a/tasks.c b/tasks.c index 400da8f73c1..9ed0ddfd331 100644 --- a/tasks.c +++ b/tasks.c @@ -294,7 +294,7 @@ typedef BaseType_t TaskRunning_t; /* Code below here allows infinite loop controlling, especially for the infinite loop * in idle task function (for example when performing unit tests). */ #ifndef INFINITE_LOOP - #define INFINITE_LOOP() 1 + #define INFINITE_LOOP() 1 #endif /* @@ -460,8 +460,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ +PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ +PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -796,7 +796,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * than priority level of currently ready tasks. */ if( pxTCB->uxPriority >= uxTopReadyPriority ) #else - /* Yield is not required for a task which is already running. */ if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) #endif @@ -3738,7 +3737,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char return pxReturn; } - #else + #else /* if ( configNUMBER_OF_CORES == 1 ) */ static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, const char pcNameToQuery[] ) { From 87c9c239b712a43563e0e56bd7d6a69350ccfc6b Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 25 Apr 2023 13:55:38 +0800 Subject: [PATCH 158/164] Fix ulTotalRunTime and ulTaskSwitchedInTime (#80) * SMP has multiple ulTotalRunTime and ulTaskSwitchedInTime --- tasks.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tasks.c b/tasks.c index 9ed0ddfd331..8121b8cb493 100644 --- a/tasks.c +++ b/tasks.c @@ -4539,9 +4539,9 @@ BaseType_t xTaskIncrementTick( void ) #if ( configGENERATE_RUN_TIME_STATS == 1 ) { #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] ); #else - ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -4551,16 +4551,16 @@ BaseType_t xTaskIncrementTick( void ) * overflows. The guard against negative values is to protect * against suspect run time stat counter implementations - which * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] ); } else { mtCOVERAGE_TEST_MARKER(); } - ulTaskSwitchedInTime = ulTotalRunTime; + ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ]; } #endif /* configGENERATE_RUN_TIME_STATS */ From b40b9e3f059b636969f790ce447e8e7c9b355672 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 25 Apr 2023 13:57:01 +0800 Subject: [PATCH 159/164] Smp dev compelete merge main 20230424 (#78) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi * ARMv7M: Adjust implemented priority bit assertions (#665) Adjust assertions related to the CMSIS __NVIC_PRIO_BITS and FreeRTOS configPRIO_BITS configuration macros such that these macros specify the minimum number of implemented priority bits supported by a config build rather than the exact number of implemented priority bits. Related to Qemu issue #1122 * Format portmacro.h in arm CM0 ports * portable/ARM_CM0: Add xPortIsInsideInterrupt Add missing xPortIsInsideInterrupt function to Cortex-M0 port. --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi --- portable/CCS/ARM_CM3/port.c | 20 ++-- portable/CCS/ARM_CM4F/port.c | 20 ++-- portable/GCC/ARM_CM0/portmacro.h | 156 +++++++++++++++++++----------- portable/GCC/ARM_CM3/port.c | 20 ++-- portable/GCC/ARM_CM3_MPU/port.c | 28 +++--- portable/GCC/ARM_CM4F/port.c | 20 ++-- portable/GCC/ARM_CM4_MPU/port.c | 28 +++--- portable/GCC/ARM_CM7/r0p1/port.c | 20 ++-- portable/IAR/ARM_CM0/portmacro.h | 149 +++++++++++++++++----------- portable/IAR/ARM_CM3/port.c | 20 ++-- portable/IAR/ARM_CM4F/port.c | 20 ++-- portable/IAR/ARM_CM4F_MPU/port.c | 20 ++-- portable/IAR/ARM_CM7/r0p1/port.c | 20 ++-- portable/MikroC/ARM_CM4F/port.c | 20 ++-- portable/RVDS/ARM_CM0/portmacro.h | 135 ++++++++++++++++---------- portable/RVDS/ARM_CM3/port.c | 20 ++-- portable/RVDS/ARM_CM4F/port.c | 20 ++-- portable/RVDS/ARM_CM4_MPU/port.c | 20 ++-- portable/RVDS/ARM_CM7/r0p1/port.c | 20 ++-- 19 files changed, 475 insertions(+), 301 deletions(-) diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index ef5fa5b9340..f3c4e5add03 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -287,19 +287,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index c43cf0ef314..c675afe67b4 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -306,19 +306,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index 408162d6402..b9e9ef6623f 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -28,11 +28,13 @@ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -45,84 +47,120 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); - - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); + +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portNOP() +#define portNOP() - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 4aa1f2425d7..9b42eac5a4e 100755 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -330,19 +330,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index e33df014c0e..619f2b0c8be 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -452,21 +452,25 @@ BaseType_t xPortStartScheduler( void ) } #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); + } #endif #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); + } #endif /* Shift the priority group value back to its position within the AIRCR diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index fd9e6dbb8f2..88fc76db894 100755 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -373,19 +373,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 1733fd82072..ab76ee84204 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -495,21 +495,25 @@ BaseType_t xPortStartScheduler( void ) } #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried + * from hardware is at least as many as specified in the + * CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); + } #endif #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried + * from hardware is at least as many as specified in the + * FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); + } #endif /* Shift the priority group value back to its position within the AIRCR diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 316dba13b8e..2be4f27704d 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -361,19 +361,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index ce1aa08fc9d..5dcc949b228 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -26,12 +26,15 @@ * */ + #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -44,87 +47,119 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) - #define portNVIC_PENDSVSET 0x10000000 - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) +#define portNVIC_PENDSVSET 0x10000000 +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ); +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ); - #define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" ) - #define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" ) +#define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) + +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif + +/*-----------------------------------------------------------*/ - #define portNOP() +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index f1c78e46240..d54c3aceb4a 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -279,19 +279,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index 05d5be0aa65..e0deaf12840 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -317,19 +317,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 69b7bc5d9bc..1b7cca65c86 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -431,19 +431,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 9217653a7d4..63f83993db9 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -305,19 +305,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 1936de19447..8ef593f5523 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -367,19 +367,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM0/portmacro.h b/portable/RVDS/ARM_CM0/portmacro.h index 54165e74cc4..4a1ea8a7b3a 100644 --- a/portable/RVDS/ARM_CM0/portmacro.h +++ b/portable/RVDS/ARM_CM0/portmacro.h @@ -47,76 +47,113 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ); - - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) - #define portDISABLE_INTERRUPTS() __disable_irq() - #define portENABLE_INTERRUPTS() __enable_irq() - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ); + +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __disable_irq() +#define portENABLE_INTERRUPTS() __enable_irq() +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) + +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE __forceinline +#endif + +/*-----------------------------------------------------------*/ + +static portFORCE_INLINE BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; - #define portNOP() + /* Obtain the number of the currently executing interrupt. */ + __asm + { +/* *INDENT-OFF* */ + mrs ulCurrentInterrupt, ipsr +/* *INDENT-ON* */ + } + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index 2ffdd9cc8bd..ae7ce37f37b 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -332,19 +332,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index bf2fb86f76a..cb003aa38f9 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -398,19 +398,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index e0bd8c86d0f..13e2f8a8ed1 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -491,19 +491,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index 2e81e324df9..1df54ab2802 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -382,19 +382,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif From c4a8d7aa828d6977a9a4db4fd433e389008efea6 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Tue, 16 May 2023 18:32:40 +0800 Subject: [PATCH 160/164] Update coverity violation for SMP (#81) * Update coverity violation for SMP ( code surrounded by configNUMBER_OF_CORES > 1 ). * Single core and common code are still scanned by lint tool. --- .github/lexicon.txt | 1 + MISRA.md | 72 +++++++++ include/task.h | 47 +++++- tasks.c | 352 ++++++++++++++++++++++++-------------------- 4 files changed, 305 insertions(+), 167 deletions(-) create mode 100644 MISRA.md diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 5e7d6dbbf75..78c911c5d91 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -317,6 +317,7 @@ coproc coprocessor coprocessors coreid +coverity covfs cp cpacr diff --git a/MISRA.md b/MISRA.md new file mode 100644 index 00000000000..e7ebf77ea6a --- /dev/null +++ b/MISRA.md @@ -0,0 +1,72 @@ +# MISRA Compliance + +FreeRTOS-Kernel conforms to [MISRA C:2012](https://www.misra.org.uk/misra-c) +guidelines, with the deviations listed below. Compliance is checked with +Coverity static analysis. Since the FreeRTOS kernel is designed for +small-embedded devices, it needs to have a very small memory footprint and +has to be efficient. To achieve that and to increase the performance, it +deviates from some MISRA rules. The specific deviations, suppressed inline, +are listed below. + +Additionally, [MISRA configuration](#misra-configuration) contains project +wide deviations. + +### Suppressed with Coverity Comments +To find the violation references in the source files run grep on the source code +with ( Assuming rule 8.4 violation; with justification in point 1 ): +``` +grep 'MISRA Ref 8.4.1' . -rI +``` + +#### Rule 8.4 + +_Ref 8.4.1_ + +- MISRA C:2012 Rule 8.4: A compatible declaration shall be visible when an + object or function with external linkage is defined. + This rule requires that a compatible declaration is made available + in a header file when an object with external linkage is defined. + pxCurrentTCB(s) is defined with external linkage but it is only + referenced from the assembly code in the port files. Therefore, adding + a declaration in header file is not useful as the assembly code will + still need to declare it separately. + +### MISRA configuration + +Copy below content to `misra.conf` to run Coverity on FreeRTOS-Kernel. + +``` +// MISRA C-2012 Rules +{ + version : "2.0", + standard : "c2012", + title: "Coverity MISRA Configuration", + deviations : [ + // Disable the following rules. + { + deviation: "Directive 4.8", + reason: "HeapRegion_t and HeapStats_t are used only in heap files but declared in portable.h which is included in multiple source files. As a result, these definitions appear in multiple source files where they are not used." + }, + { + deviation: "Directive 4.9", + reason: "FreeRTOS-Kernel is optimised to work on small micro-controllers. To achieve that, function-like macros are used." + }, + { + deviation: "Rule 1.2", + reason: "The __attribute__ tags are used via macros which are defined in port files." + }, + { + deviation: "Rule 3.1", + reason: "We post HTTP links in code comments which contain // inside comments blocks." + }, + { + deviation: "Rule 8.7", + reason: "API functions are not used by the library outside of the files they are defined; however, they must be externally visible in order to be used by an application." + }, + { + deviation: "Rule 11.5", + reason: "Allow casts from `void *`. List owner, pvOwner, is stored as `void *` and are cast to various types for use in functions." + } + ] +} +``` \ No newline at end of file diff --git a/include/task.h b/include/task.h index 8745a1d6481..52d5d40602b 100644 --- a/include/task.h +++ b/include/task.h @@ -84,7 +84,8 @@ * \ingroup Tasks */ struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ -typedef struct tskTaskControlBlock * TaskHandle_t; +typedef struct tskTaskControlBlock * TaskHandle_t; +typedef const struct tskTaskControlBlock * ConstTaskHandle_t; /* * Defines the prototype to which the application task hook function must @@ -193,7 +194,7 @@ typedef enum * * \ingroup TaskUtils */ -#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) +#define tskNO_AFFINITY ( ( UBaseType_t ) -1 ) /** * task. h @@ -271,7 +272,7 @@ typedef enum #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) /* Checks if core ID is valid. */ -#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUMBER_OF_CORES ) ) ) +#define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) ) /*----------------------------------------------------------- * TASK CREATION API @@ -746,7 +747,7 @@ typedef enum * \defgroup vTaskAllocateMPURegions vTaskAllocateMPURegions * \ingroup Tasks */ -void vTaskAllocateMPURegions( TaskHandle_t xTask, +void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; /** @@ -1363,7 +1364,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; * } * } */ - UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ); + UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask ); #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -3418,6 +3419,42 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC */ void vTaskYieldWithinAPI( void ); +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when portCRITICAL_NESTING_IN_TCB is set to 1 or configNUMBER_OF_CORES + * is greater than 1. This function can be used in the implementation of portENTER_CRITICAL + * if port wants to maintain critical nesting count in TCB in single core FreeRTOS. + * It should be used in the implementation of portENTER_CRITICAL if port is running a + * multiple core FreeRTOS. + */ +void vTaskEnterCritical( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when portCRITICAL_NESTING_IN_TCB is set to 1 or configNUMBER_OF_CORES + * is greater than 1. This function can be used in the implementation of portEXIT_CRITICAL + * if port wants to maintain critical nesting count in TCB in single core FreeRTOS. + * It should be used in the implementation of portEXIT_CRITICAL if port is running a + * multiple core FreeRTOS. + */ +void vTaskExitCritical( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when configNUMBER_OF_CORES is greater than 1. This function + * should be used in the implementation of portENTER_CRITICAL_FROM_ISR if port is + * running a multiple core FreeRTOS. + */ +portBASE_TYPE vTaskEnterCriticalFromISR( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when configNUMBER_OF_CORES is greater than 1. This function + * should be used in the implementation of portEXIT_CRITICAL_FROM_ISR if port is + * running a multiple core FreeRTOS. + */ +void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/tasks.c b/tasks.c index 8121b8cb493..5d8d3233ee3 100644 --- a/tasks.c +++ b/tasks.c @@ -58,18 +58,16 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ -#if ( configUSE_PREEMPTION == 0 ) +#if ( configNUMBER_OF_CORES == 1 ) + #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be * performed just because a higher priority task has been woken. */ - #define taskYIELD_IF_USING_PREEMPTION() -#else - #if ( configNUMBER_OF_CORES == 1 ) - #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #define taskYIELD_IF_USING_PREEMPTION() #else - #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() #endif -#endif +#endif /* if ( configNUMBER_OF_CORES == 1 ) */ /* Values that can be assigned to the ucNotifyState member of the TCB. */ #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */ @@ -137,7 +135,8 @@ /*-----------------------------------------------------------*/ - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + #if ( configNUMBER_OF_CORES == 1 ) + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ do { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ @@ -153,6 +152,7 @@ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #endif /* if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -268,17 +268,11 @@ typedef BaseType_t TaskRunning_t; /* Indicates that the task is actively running but scheduled to yield. */ #define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) -/* taskTASK_IS_RUNNING - Returns pdTRUE if the task is actively running - * and not scheduled to yield. - * taskTASK_IS_YIELDING - Returns pdTRUE if the task is actively running - * but scheduled to yield. - */ +/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ #if ( configNUMBER_OF_CORES == 1 ) - #define taskTASK_IS_RUNNING( pxTCB ) ( pxTCB == pxCurrentTCB ) - #define taskTASK_IS_YIELDING( pxTCB ) ( pdFALSE ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) ) #else - #define taskTASK_IS_RUNNING( pxTCB ) ( ( pxTCB->xTaskRunState >= 0 ) && ( pxTCB->xTaskRunState < configNUMBER_OF_CORES ) ) - #define taskTASK_IS_YIELDING( pxTCB ) ( pxTCB->xTaskRunState == taskTASK_YIELDING ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) ) #endif /* Indicates that the task is an Idle task. */ @@ -391,7 +385,10 @@ typedef tskTCB TCB_t; #if ( configNUMBER_OF_CORES == 1 ) portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; #else -portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ] = { NULL }; + /* MISRA Ref 8.4.1 [Declaration shall be visible] */ + /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */ + /* coverity[misra_c_2012_rule_8_4_violation] */ + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ]; #define pxCurrentTCB xTaskGetCurrentTaskHandle() #endif @@ -434,8 +431,8 @@ PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; -PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ] = { NULL }; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ]; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -460,14 +457,18 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ -PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the value of a timer/counter the last time a task was switched in. */ -PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0UL }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ +PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the value of a timer/counter the last time a task was switched in. */ +PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) + +/* Do not move these variables to function scope as doing so prevents the + * code working with debuggers that need to remove the static qualifier. */ static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ]; static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + #endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ /*lint -restore */ @@ -504,7 +505,7 @@ static BaseType_t prvCreateIdleTasks( void ); * Yields a core, or cores if multiple priorities are not allowed to run * simultaneously, to allow the task pxTCB to run. */ - static void prvYieldForTask( TCB_t * pxTCB ); + static void prvYieldForTask( const TCB_t * pxTCB ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #if ( configNUMBER_OF_CORES > 1 ) @@ -680,13 +681,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif +#if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + extern void vApplicationMinimalIdleHook( void ); +#endif /* #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) */ + /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) static void prvCheckForRunStateChange( void ) { UBaseType_t uxPrevCriticalNesting; - TCB_t * pxThisTCB; + const TCB_t * pxThisTCB; /* This should be skipped if called from an ISR. If the task on the current * core is no longer running, then vTaskSwitchContext() probably should @@ -779,14 +784,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) - static void prvYieldForTask( TCB_t * pxTCB ) + static void prvYieldForTask( const TCB_t * pxTCB ) { BaseType_t xLowestPriorityToPreempt; BaseType_t xCurrentCoreTaskPriority; BaseType_t xLowestPriorityCore = ( BaseType_t ) -1; - BaseType_t xYieldCount = 0; BaseType_t xCoreID; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + BaseType_t xYieldCount = 0; + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + /* This must be called from a critical section. */ configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); @@ -811,7 +819,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ - if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) { xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1; } @@ -825,7 +833,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt ) { #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -866,7 +874,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; } } - if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) ) + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) ) + #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + if( xLowestPriorityCore >= 0 ) + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ { prvYieldCore( xLowestPriorityCore ); } @@ -893,7 +905,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; BaseType_t xDecrementTopPriority = pdTRUE; #if ( configUSE_CORE_AFFINITY == 1 ) - TCB_t * pxPreviousTCB = NULL; + const TCB_t * pxPreviousTCB = NULL; #endif #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) BaseType_t xPriorityDropped = pdFALSE; @@ -915,7 +927,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE ) { - uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem ); + ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem ); vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), &pxCurrentTCBs[ xCoreID ]->xStateListItem ); } @@ -936,7 +948,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) { - List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); + const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList ); ListItem_t * pxIterator; @@ -946,7 +958,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) ) { - TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator ); + TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { @@ -966,7 +978,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) #endif { /* If the task is not being executed by any core swap it in. */ @@ -984,7 +996,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) #endif { /* The task is already running on this core, mark it as scheduled. */ @@ -1022,23 +1034,35 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * The scheduler should be able to select a task to run when uxCurrentPriority * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow * tskIDLE_PRIORITY. */ - uxCurrentPriority--; + if( uxCurrentPriority > tskIDLE_PRIORITY ) + { + uxCurrentPriority--; + } + else + { + /* This function is called when idle task is not created. Break the + * loop to prevent uxCurrentPriority overrun. */ + break; + } } #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { - if( xPriorityDropped != pdFALSE ) + if( xTaskScheduled == pdTRUE ) { - /* There may be several ready tasks that were being prevented from running because there was - * a higher priority task running. Now that the last of the higher priority tasks is no longer - * running, make sure all the other idle tasks yield. */ - BaseType_t x; - - for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) + if( xPriorityDropped != pdFALSE ) { - if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + BaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) { - prvYieldCore( x ); + if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + prvYieldCore( x ); + } } } } @@ -1047,70 +1071,73 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #if ( configUSE_CORE_AFFINITY == 1 ) { - if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + if( xTaskScheduled == pdTRUE ) { - /* A ready task was just evicted from this core. See if it can be - * scheduled on any other core. */ - UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; - BaseType_t xLowestPriority = pxPreviousTCB->uxPriority; - BaseType_t xLowestPriorityCore = -1; - BaseType_t x; - - if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { - xLowestPriority = xLowestPriority - 1; - } + /* A ready task was just evicted from this core. See if it can be + * scheduled on any other core. */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t x; - if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) - { - /* The ready task that was removed from this core is not excluded from it. - * Only look at the intersection of the cores the removed task is allowed to run - * on with the cores that the new task is excluded from. It is possible that the - * new task was only placed onto this core because it is excluded from another. - * Check to see if the previous task could run on one of those cores. */ - uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); - } - else - { - /* The ready task that was removed from this core is excluded from it. */ - } + if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + xLowestPriority = xLowestPriority - 1; + } - uxCoreMap &= ( ( 1 << configNUMBER_OF_CORES ) - 1 ); + if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } - for( x = ( configNUMBER_OF_CORES - 1 ); x >= 0; x-- ) - { - UBaseType_t uxCore = ( UBaseType_t ) x; - BaseType_t xTaskPriority; + uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U ); - if( ( uxCoreMap & ( 1 << uxCore ) ) != 0 ) + for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- ) { - xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + UBaseType_t uxCore = ( UBaseType_t ) x; + BaseType_t xTaskPriority; - if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U ) { - xTaskPriority = xTaskPriority - ( BaseType_t ) 1; - } + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; - uxCoreMap &= ~( 1 << uxCore ); + if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } - if( ( xTaskPriority < xLowestPriority ) && - ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && - ( xYieldPendings[ uxCore ] == pdFALSE ) ) - { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif + uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore ); + + if( ( xTaskPriority < xLowestPriority ) && + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && + ( xYieldPendings[ uxCore ] == pdFALSE ) ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = ( BaseType_t ) uxCore; + } } } } - } - if( xLowestPriorityCore >= 0 ) - { - prvYieldCore( xLowestPriorityCore ); + if( xLowestPriorityCore >= 0 ) + { + prvYieldCore( xLowestPriorityCore ); + } } } } @@ -1167,7 +1194,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /* The memory used for the task's TCB and stack are passed into this * function - use them. */ pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ @@ -1226,7 +1253,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * on the implementation of the port malloc function and whether or * not static allocation is being used. */ pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; @@ -1285,14 +1312,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxTaskDefinition->puxStackBuffer != NULL ) { - /* Allocate space for the TCB. Where the memory comes from depends - * on the implementation of the port malloc function and whether or - * not static allocation is being used. */ pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); if( pxNewTCB != NULL ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; @@ -1369,7 +1393,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxNewTCB != NULL ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Allocate space for the stack used by the task being created. * The base of the stack memory stored in the TCB so the task can @@ -1398,7 +1422,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxNewTCB != NULL ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxStack; @@ -1665,7 +1689,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ - if( ( pxTaskCode == prvIdleTask ) || ( pxTaskCode == prvMinimalIdleTask ) ) + if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvMinimalIdleTask ) ) { pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE; } @@ -1794,12 +1818,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) { BaseType_t xCoreID; /* Check if a core is free. */ - for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ ) + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { if( pxCurrentTCBs[ xCoreID ] == NULL ) { @@ -2194,7 +2218,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * suspended. */ eReturn = eSuspended; - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) { @@ -2567,7 +2591,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* If the task can no longer run on the core it was running, * request the core to yield. */ - if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) == 0 ) + if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U ) { prvYieldCore( xCoreID ); } @@ -2578,7 +2602,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Calculate the cores on which this task was not allowed to * run previously. */ - uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1 << configNUMBER_OF_CORES ) - 1 ); + uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U ); /* Does the new core mask enables this task to run on any of the * previously not allowed cores? If yes, check if this task can be @@ -2602,9 +2626,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /*-----------------------------------------------------------*/ #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) - UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) + UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask ) { - TCB_t * pxTCB; + const TCB_t * pxTCB; UBaseType_t uxCoreAffinityMask; taskENTER_CRITICAL(); @@ -2715,7 +2739,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { BaseType_t x; - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) { @@ -3127,12 +3151,13 @@ static BaseType_t prvCreateIdleTasks( void ) } /* Append the idle task number to the end of the name if there is space. */ - if( x < configMAX_TASK_NAME_LEN ) + if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN ) { - cIdleName[ x++ ] = ( char ) xCoreID + '0'; + cIdleName[ x ] = ( char ) ( xCoreID + '0' ); + x++; /* And append a null character if there is space. */ - if( x < configMAX_TASK_NAME_LEN ) + if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN ) { cIdleName[ x ] = '\0'; } @@ -4171,8 +4196,7 @@ BaseType_t xTaskIncrementTick( void ) BaseType_t xSwitchRequired = pdFALSE; #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) - UBaseType_t x; - BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE }; + BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE }; #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ /* Called by the portable layer each time a tick interrupt occurs. @@ -4315,11 +4339,13 @@ BaseType_t xTaskIncrementTick( void ) } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - for( x = ( ( UBaseType_t ) 0 ); x < ( ( UBaseType_t ) configNUMBER_OF_CORES ); x++ ) + BaseType_t xCoreID; + + for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1 ) { - xYieldRequiredForCore[ x ] = pdTRUE; + xYieldRequiredForCore[ xCoreID ] = pdTRUE; } else { @@ -4362,24 +4388,24 @@ BaseType_t xTaskIncrementTick( void ) } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - BaseType_t xCoreID; - xCoreID = portGET_CORE_ID(); + BaseType_t xCoreID, xCurrentCoreID; + xCurrentCoreID = portGET_CORE_ID(); - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUMBER_OF_CORES; x++ ) + for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) #endif { - if( ( xYieldRequiredForCore[ x ] != pdFALSE ) || ( xYieldPendings[ x ] != pdFALSE ) ) + if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) ) { - if( x == ( UBaseType_t ) xCoreID ) + if( xCoreID == xCurrentCoreID ) { xSwitchRequired = pdTRUE; } else { - prvYieldCore( x ); + prvYieldCore( xCoreID ); } } else @@ -5118,8 +5144,6 @@ void vTaskMissedYield( void ) #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) { - extern void vApplicationMinimalIdleHook( void ); - /* Call the user defined function from within the idle task. This * allows the application designer to add background functionality * without the overhead of a separate task. @@ -5147,7 +5171,7 @@ void vTaskMissedYield( void ) * */ -portTASK_FUNCTION( prvIdleTask, pvParameters ) +static portTASK_FUNCTION( prvIdleTask, pvParameters ) { /* Stop warnings. */ ( void ) pvParameters; @@ -5266,8 +5290,6 @@ portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) { - extern void vApplicationMinimalIdleHook( void ); - /* Call the user defined function from within the idle task. This * allows the application designer to add background functionality * without the overhead of a separate task. @@ -5343,7 +5365,7 @@ portTASK_FUNCTION( prvIdleTask, pvParameters ) TCB_t * pxTCB; if( ( xIndex >= 0 ) && - ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) + ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToSet ); configASSERT( pxTCB != NULL ); @@ -5363,7 +5385,7 @@ portTASK_FUNCTION( prvIdleTask, pvParameters ) TCB_t * pxTCB; if( ( xIndex >= 0 ) && - ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) + ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToQuery ); pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; @@ -5382,7 +5404,7 @@ portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( portUSING_MPU_WRAPPERS == 1 ) void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, - const MemoryRegion_t * const xRegions ) + const MemoryRegion_t * const pxRegions ) { TCB_t * pxTCB; @@ -5390,7 +5412,7 @@ portTASK_FUNCTION( prvIdleTask, pvParameters ) * the calling task. */ pxTCB = prvGetTCBFromHandle( xTaskToModify ); - vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 ); } #endif /* portUSING_MPU_WRAPPERS */ @@ -6463,11 +6485,11 @@ static void prvResetNextTaskUnblockTime( void ) size_t x; /* Start by copying the entire string. */ - strcpy( pcBuffer, pcTaskName ); + ( void ) strcpy( pcBuffer, pcTaskName ); /* Pad the end of the string with spaces to ensure columns line up when * printed out. */ - for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ ) { pcBuffer[ x ] = ' '; } @@ -6727,14 +6749,18 @@ TickType_t uxTaskResetEventItemValue( void ) TaskHandle_t pvTaskIncrementMutexHeldCount( void ) { + TCB_t * pxTCB; + + pxTCB = pxCurrentTCB; + /* If xSemaphoreCreateMutex() is called before any tasks have been created * then pxCurrentTCB will be NULL. */ - if( pxCurrentTCB != NULL ) + if( pxTCB != NULL ) { - ( pxCurrentTCB->uxMutexesHeld )++; + ( pxTCB->uxMutexesHeld )++; } - return pxCurrentTCB; + return pxTCB; } #endif /* configUSE_MUTEXES */ @@ -6742,26 +6768,26 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait, + uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) { uint32_t ulReturn; - configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES ); taskENTER_CRITICAL(); { /* Only block if the notification count is not already non-zero. */ - if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL ) + if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL ) { /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait ); + traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn ); /* All ports are written to allow a yield in a critical * section (some will yield immediately, others wait until the @@ -6791,18 +6817,18 @@ TickType_t uxTaskResetEventItemValue( void ) taskENTER_CRITICAL(); { - traceTASK_NOTIFY_TAKE( uxIndexToWait ); - ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); + ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; if( ulReturn != 0UL ) { if( xClearCountOnExit != pdFALSE ) { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = 0UL; } else { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1; } } else @@ -6810,7 +6836,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } taskEXIT_CRITICAL(); @@ -6822,7 +6848,7 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait, + BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t * pulNotificationValue, @@ -6830,25 +6856,25 @@ TickType_t uxTaskResetEventItemValue( void ) { BaseType_t xReturn; - configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES ); taskENTER_CRITICAL(); { /* Only block if a notification is not already pending. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) { /* Clear bits in the task's notification value as bits may get * set by the notifying task or interrupt. This can be used to * clear the value to zero. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry; /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait ); + traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn ); /* All ports are written to allow a yield in a critical * section (some will yield immediately, others wait until the @@ -6878,20 +6904,20 @@ TickType_t uxTaskResetEventItemValue( void ) taskENTER_CRITICAL(); { - traceTASK_NOTIFY_WAIT( uxIndexToWait ); + traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); if( pulNotificationValue != NULL ) { /* Output the current notification value, which may or may not * have changed. */ - *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; } /* If ucNotifyValue is set then either the task never entered the * blocked state (because a notification was already pending) or the * task unblocked because of a notification. Otherwise the task * unblocked because of a timeout. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) { /* A notification was not received. */ xReturn = pdFALSE; @@ -6900,11 +6926,11 @@ TickType_t uxTaskResetEventItemValue( void ) { /* A notification was already pending or a notification was * received while the task was waiting. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit; xReturn = pdTRUE; } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } taskEXIT_CRITICAL(); @@ -7421,8 +7447,9 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) { configRUN_TIME_COUNTER_TYPE ulReturn = 0; + BaseType_t i; - for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) + for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ ) { ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; } @@ -7439,6 +7466,7 @@ TickType_t uxTaskResetEventItemValue( void ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; + BaseType_t i; ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES; @@ -7448,7 +7476,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Avoid divide by zero errors. */ if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) { - for( BaseType_t i = 0; i < configNUMBER_OF_CORES; i++ ) + for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ ) { ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; } From b184cb7f7ba20bc1c7b96fba8c4e8622ae402117 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Wed, 17 May 2023 14:39:09 +0800 Subject: [PATCH 161/164] Smp dev merge main 0527 (#82) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi * ARMv7M: Adjust implemented priority bit assertions (#665) Adjust assertions related to the CMSIS __NVIC_PRIO_BITS and FreeRTOS configPRIO_BITS configuration macros such that these macros specify the minimum number of implemented priority bits supported by a config build rather than the exact number of implemented priority bits. Related to Qemu issue #1122 * Format portmacro.h in arm CM0 ports * portable/ARM_CM0: Add xPortIsInsideInterrupt Add missing xPortIsInsideInterrupt function to Cortex-M0 port. * tree-wide: Unify formatting of __cplusplus ifdefs * Paranthesize expression-like macro (#668) * Updated tasks.c checks for scheduler suspension (#670) This commit updates the checks for the variable uxSchedulerSuspended in tasks.c module to use a uniform format. Signed-off-by: Sudeep Mohanty * Fix cast alignment warning (#669) * Fix cast alignment warning Without this change, the code produces the following warning when compiled with `-Wcast-align` flag: ``` cast increases required alignment of target type ``` Signed-off-by: Gaurav Aggarwal * Align StackSize and StackAddress for macOS (#674) * Armv8-M (except Cortex-M23) interrupt priority checking (#673) * Armv8-M: Formatting changes Signed-off-by: Devaraj Ranganna * Armv8-M: Add support for interrupt priority check FreeRTOS provides `FromISR` system calls which can be called directly from interrupt service routines. It is crucial that the priority of these ISRs is set to same or lower value (numerically higher) than that of `configMAX_SYSCALL_INTERRUPT_PRIORITY`. For more information refer to https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. Add a check to trigger an assert when an ISR with priority higher (numerically lower) than `configMAX_SYSCALL_INTERRUPT_PRIORITY` calls `FromISR` system calls if `configASSERT` macro is defined. In addition, add a config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` to disable interrupt priority check while running on QEMU. Based on the discussion https://gitlab.com/qemu-project/qemu/-/issues/1122, The interrupt priority bits in QEMU do not match the real hardware. Therefore the assert that checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. The config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the `FreeRTOSConfig.h` for QEMU targets. Signed-off-by: Devaraj Ranganna * Use SHPR2 for calculating interrupt priority bits This removes the dependency on the secure software to mark the interrupt as non-secure. Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Use the extended movx instruction instead of mov (#676) The following is from the MSP430X instruction set - ``` MOVX.W Move source word to destination word. The source operand is copied to the destination. The source operand is not affected. Both operands may be located in the full address space. ``` The movx instruction allows both the operands to be located in the full address space and therefore, works with large data model as well. Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Signed-off-by: Sudeep Mohanty Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi Co-authored-by: Sudeep Mohanty <91244425+sudeep-mohanty@users.noreply.github.com> Co-authored-by: Monika Singh <108652024+moninom1@users.noreply.github.com> --- .github/lexicon.txt | 1 + include/newlib-freertos.h | 2 +- portable/ARMv8M/non_secure/port.c | 196 ++++++++++++ .../portable/GCC/ARM_CM23/portmacro.h | 23 +- .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 23 +- .../portable/GCC/ARM_CM33/portmacro.h | 11 +- .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 11 +- .../portable/GCC/ARM_CM35P/portmacro.h | 11 +- .../portable/GCC/ARM_CM55/portmacro.h | 11 +- .../portable/GCC/ARM_CM85/portmacro.h | 11 +- .../portable/IAR/ARM_CM23/portmacro.h | 21 +- .../portable/IAR/ARM_CM23_NTZ/portmacro.h | 21 +- .../portable/IAR/ARM_CM33/portmacro.h | 11 +- .../portable/IAR/ARM_CM33_NTZ/portmacro.h | 11 +- .../portable/IAR/ARM_CM35P/portmacro.h | 11 +- .../portable/IAR/ARM_CM55/portmacro.h | 11 +- .../portable/IAR/ARM_CM85/portmacro.h | 11 +- portable/ARMv8M/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/CCS/ARM_CM3/portmacro.h | 16 +- portable/CCS/ARM_CM4F/portmacro.h | 16 +- portable/CCS/MSP430X/portext.asm | 4 +- portable/CodeWarrior/ColdFire_V1/portmacro.h | 8 +- portable/CodeWarrior/ColdFire_V2/portmacro.h | 8 +- portable/GCC/ARM7_AT91FR40008/portmacro.h | 8 +- portable/GCC/ARM7_AT91SAM7S/portmacro.h | 8 +- portable/GCC/ARM7_LPC2000/portmacro.h | 8 +- portable/GCC/ARM7_LPC23xx/portmacro.h | 8 +- portable/GCC/ARM_CA53_64_BIT/portmacro.h | 13 +- portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h | 13 +- portable/GCC/ARM_CA9/portmacro.h | 13 +- portable/GCC/ARM_CM0/portmacro.h | 1 - portable/GCC/ARM_CM23/non_secure/port.c | 196 ++++++++++++ portable/GCC/ARM_CM23/non_secure/portmacro.h | 23 +- .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 196 ++++++++++++ .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 23 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM3/portmacro.h | 16 +- portable/GCC/ARM_CM33/non_secure/port.c | 196 ++++++++++++ portable/GCC/ARM_CM33/non_secure/portmacro.h | 11 +- .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 196 ++++++++++++ .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM35P/non_secure/port.c | 196 ++++++++++++ portable/GCC/ARM_CM35P/non_secure/portmacro.h | 11 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 196 ++++++++++++ .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 11 +- .../non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM3_MPU/portmacro.h | 21 +- portable/GCC/ARM_CM4F/portmacro.h | 16 +- portable/GCC/ARM_CM55/non_secure/port.c | 196 ++++++++++++ portable/GCC/ARM_CM55/non_secure/portmacro.h | 11 +- .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 196 ++++++++++++ .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM7/r0p1/portmacro.h | 16 +- portable/GCC/ARM_CM85/non_secure/port.c | 196 ++++++++++++ portable/GCC/ARM_CM85/non_secure/portmacro.h | 11 +- .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 196 ++++++++++++ .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/GCC/ARM_CRx_No_GIC/portmacro.h | 7 +- portable/GCC/ATMega323/portmacro.h | 8 +- portable/GCC/AVR32_UC3/portmacro.h | 8 +- portable/GCC/CORTUS_APS3/portmacro.h | 8 +- portable/GCC/ColdFire_V2/portmacro.h | 8 +- portable/GCC/H8S2329/portmacro.h | 8 +- portable/GCC/HCS12/portmacro.h | 8 +- portable/GCC/IA32_flat/portmacro.h | 6 +- portable/GCC/MSP430F449/portmacro.h | 8 +- portable/GCC/MicroBlaze/portmacro.h | 8 +- portable/GCC/MicroBlazeV8/portmacro.h | 8 +- portable/GCC/MicroBlazeV9/portmacro.h | 8 +- portable/GCC/NiosII/portmacro.h | 8 +- portable/GCC/PPC405_Xilinx/portmacro.h | 8 +- portable/GCC/PPC440_Xilinx/portmacro.h | 8 +- portable/GCC/RISC-V/portmacro.h | 8 +- portable/GCC/RX100/portmacro.h | 8 +- portable/GCC/RX200/portmacro.h | 8 +- portable/GCC/RX600/portmacro.h | 8 +- portable/GCC/RX600v2/portmacro.h | 8 +- portable/GCC/RX700v3_DPFPU/portmacro.h | 16 +- portable/GCC/STR75x/portmacro.h | 8 +- portable/GCC/TriCore_1782/portmacro.h | 8 +- portable/IAR/78K0R/portmacro.h | 8 +- portable/IAR/ARM_CA5_No_GIC/portmacro.h | 21 +- portable/IAR/ARM_CA9/portmacro.h | 21 +- portable/IAR/ARM_CM23/non_secure/port.c | 196 ++++++++++++ portable/IAR/ARM_CM23/non_secure/portmacro.h | 21 +- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 196 ++++++++++++ .../IAR/ARM_CM23_NTZ/non_secure/portmacro.h | 21 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM3/portmacro.h | 16 +- portable/IAR/ARM_CM33/non_secure/port.c | 196 ++++++++++++ portable/IAR/ARM_CM33/non_secure/portmacro.h | 11 +- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 196 ++++++++++++ .../IAR/ARM_CM33_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM35P/non_secure/port.c | 196 ++++++++++++ portable/IAR/ARM_CM35P/non_secure/portmacro.h | 11 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 196 ++++++++++++ .../IAR/ARM_CM35P_NTZ/non_secure/portmacro.h | 11 +- .../non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM4F/portmacro.h | 16 +- portable/IAR/ARM_CM55/non_secure/port.c | 196 ++++++++++++ portable/IAR/ARM_CM55/non_secure/portmacro.h | 11 +- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 196 ++++++++++++ .../IAR/ARM_CM55_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM7/r0p1/portmacro.h | 16 +- portable/IAR/ARM_CM85/non_secure/port.c | 196 ++++++++++++ portable/IAR/ARM_CM85/non_secure/portmacro.h | 11 +- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 196 ++++++++++++ .../IAR/ARM_CM85_NTZ/non_secure/portmacro.h | 11 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 285 ++++++++++-------- portable/IAR/ARM_CRx_No_GIC/portmacro.h | 7 +- portable/IAR/ATMega323/portmacro.h | 8 +- portable/IAR/AVR32_UC3/portmacro.h | 8 +- portable/IAR/AtmelSAM7S64/portmacro.h | 8 +- portable/IAR/AtmelSAM9XE/portmacro.h | 8 +- portable/IAR/LPC2000/portmacro.h | 8 +- portable/IAR/RISC-V/portmacro.h | 8 +- portable/IAR/RL78/portmacro.h | 10 +- portable/IAR/RX100/portmacro.h | 8 +- portable/IAR/RX600/portmacro.h | 8 +- portable/IAR/RX700v3_DPFPU/portmacro.h | 16 +- portable/IAR/RXv2/portmacro.h | 8 +- portable/IAR/STR71x/portmacro.h | 8 +- portable/IAR/STR75x/portmacro.h | 8 +- portable/IAR/STR91x/portmacro.h | 8 +- portable/IAR/V850ES/portmacro.h | 8 +- portable/MPLAB/PIC24_dsPIC/portmacro.h | 8 +- portable/MPLAB/PIC32MEC14xx/portmacro.h | 8 +- portable/MPLAB/PIC32MX/portmacro.h | 8 +- portable/MPLAB/PIC32MZ/portmacro.h | 8 +- portable/MikroC/ARM_CM4F/portmacro.h | 16 +- .../Tern_EE/large_untested/portmacro.h | 8 +- portable/Paradigm/Tern_EE/small/portmacro.h | 8 +- portable/RVDS/ARM_CA9/portmacro.h | 8 +- portable/Renesas/RX100/portmacro.h | 8 +- portable/Renesas/RX200/portmacro.h | 8 +- portable/Renesas/RX600/portmacro.h | 8 +- portable/Renesas/RX600v2/portmacro.h | 8 +- portable/Renesas/RX700v3_DPFPU/portmacro.h | 16 +- portable/Renesas/SH2A_FPU/portmacro.h | 8 +- portable/Tasking/ARM_CM4F/portmacro.h | 16 +- .../ThirdParty/CDK/T-HEAD_CK802/portmacro.h | 11 +- portable/ThirdParty/GCC/ARC_v1/portmacro.h | 16 +- portable/ThirdParty/GCC/ATmega/portmacro.h | 8 +- portable/ThirdParty/GCC/Posix/port.c | 9 + portable/ThirdParty/GCC/Posix/portmacro.h | 8 +- .../ThirdParty/GCC/RP2040/include/portmacro.h | 16 +- .../GCC/RP2040/include/rp2040_config.h | 8 +- .../GCC/Xtensa_ESP32/include/port_systick.h | 8 +- .../GCC/Xtensa_ESP32/include/xtensa_config.h | 17 +- portable/ThirdParty/XCC/Xtensa/portmacro.h | 8 +- .../ThirdParty/XCC/Xtensa/xtensa_config.h | 8 +- portable/oWatcom/16BitDOS/Flsh186/portmacro.h | 8 +- portable/oWatcom/16BitDOS/PC/portmacro.h | 8 +- stream_buffer.c | 16 +- tasks.c | 28 +- 170 files changed, 8284 insertions(+), 3220 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 78c911c5d91..90452c54c56 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1611,6 +1611,7 @@ putchar puxstackbuffer puxvariabletoincrement pv +pvallocatedmemory pvbuffer pvcallbackref pvcomparand diff --git a/include/newlib-freertos.h b/include/newlib-freertos.h index 497ca529990..a65e62e8ef7 100644 --- a/include/newlib-freertos.h +++ b/include/newlib-freertos.h @@ -52,7 +52,7 @@ #endif #ifndef configSET_TLS_BLOCK - #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock ) + #define configSET_TLS_BLOCK( xTLSBlock ) ( _impure_ptr = &( xTLSBlock ) ) #endif #ifndef configDEINIT_TLS_BLOCK diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index c6dad99857c..746f734b8ac 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -61,12 +66,14 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index c6dad99857c..746f734b8ac 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -61,12 +66,14 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 4fe8c59147a..19da9b0ecfe 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 4fe8c59147a..19da9b0ecfe 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h index 33bfb283461..cc643459770 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index adb47d8420f..c9bad40cf98 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index fec6923394c..c45dd21c29e 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h index d845ac1caa0..9cf0e87fbc8 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -60,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h index d845ac1caa0..9cf0e87fbc8 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -60,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h index b6df20eb88f..380768fc03b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,6 +49,7 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ @@ -57,6 +58,10 @@ #endif /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h index b6df20eb88f..815dca0861a 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h index a0efc1f9dcf..46bc4e24b56 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h index a3b510e282c..7829ee6186a 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h index cfaae813eac..3b51cb5ff46 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/CCS/ARM_CM3/portmacro.h b/portable/CCS/ARM_CM3/portmacro.h index 7ac1d709caf..9c405d4c87f 100644 --- a/portable/CCS/ARM_CM3/portmacro.h +++ b/portable/CCS/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -166,8 +168,10 @@ /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CCS/ARM_CM4F/portmacro.h b/portable/CCS/ARM_CM4F/portmacro.h index 34988c223f5..64c702c941c 100644 --- a/portable/CCS/ARM_CM4F/portmacro.h +++ b/portable/CCS/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -160,8 +162,10 @@ /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CCS/MSP430X/portext.asm b/portable/CCS/MSP430X/portext.asm index 9ebfa995016..9fe306e9401 100644 --- a/portable/CCS/MSP430X/portext.asm +++ b/portable/CCS/MSP430X/portext.asm @@ -48,7 +48,7 @@ portSAVE_CONTEXT .macro ;Save the remaining registers. pushm_x #12, r15 - mov.w &usCriticalNesting, r14 + movx.w &usCriticalNesting, r14 push_x r14 mov_x &pxCurrentTCB, r12 mov_x sp, 0( r12 ) @@ -60,7 +60,7 @@ portRESTORE_CONTEXT .macro mov_x &pxCurrentTCB, r12 mov_x @r12, sp pop_x r15 - mov.w r15, &usCriticalNesting + movx.w r15, &usCriticalNesting popm_x #12, r15 nop pop.w sr diff --git a/portable/CodeWarrior/ColdFire_V1/portmacro.h b/portable/CodeWarrior/ColdFire_V1/portmacro.h index edae69ab2b2..8acfcd04fdd 100644 --- a/portable/CodeWarrior/ColdFire_V1/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V1/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -107,8 +109,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CodeWarrior/ColdFire_V2/portmacro.h b/portable/CodeWarrior/ColdFire_V2/portmacro.h index 665ae4b304c..053b0adecb6 100644 --- a/portable/CodeWarrior/ColdFire_V2/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V2/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_AT91FR40008/portmacro.h b/portable/GCC/ARM7_AT91FR40008/portmacro.h index 69ee70f078c..201f3c7a795 100644 --- a/portable/GCC/ARM7_AT91FR40008/portmacro.h +++ b/portable/GCC/ARM7_AT91FR40008/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -250,8 +252,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_AT91SAM7S/portmacro.h b/portable/GCC/ARM7_AT91SAM7S/portmacro.h index 1a440a68c5f..b52fbe0aa45 100644 --- a/portable/GCC/ARM7_AT91SAM7S/portmacro.h +++ b/portable/GCC/ARM7_AT91SAM7S/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_LPC2000/portmacro.h b/portable/GCC/ARM7_LPC2000/portmacro.h index 50922065441..6545e1127c4 100644 --- a/portable/GCC/ARM7_LPC2000/portmacro.h +++ b/portable/GCC/ARM7_LPC2000/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -221,8 +223,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_LPC23xx/portmacro.h b/portable/GCC/ARM7_LPC23xx/portmacro.h index 768e86dbed1..f60cdde5331 100644 --- a/portable/GCC/ARM7_LPC23xx/portmacro.h +++ b/portable/GCC/ARM7_LPC23xx/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA53_64_BIT/portmacro.h b/portable/GCC/ARM_CA53_64_BIT/portmacro.h index 1b2e277cf05..00fc061644b 100644 --- a/portable/GCC/ARM_CA53_64_BIT/portmacro.h +++ b/portable/GCC/ARM_CA53_64_BIT/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -165,11 +167,6 @@ void vPortTaskUsesFPU( void ); #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -208,4 +205,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h b/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h index d8d911f4f75..0199288824c 100644 --- a/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h +++ b/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -165,11 +167,6 @@ void vPortTaskUsesFPU( void ); #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -193,4 +190,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA9/portmacro.h b/portable/GCC/ARM_CA9/portmacro.h index d97fb535f71..6f1f2272456 100644 --- a/portable/GCC/ARM_CA9/portmacro.h +++ b/portable/GCC/ARM_CA9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -162,11 +164,6 @@ by default. */ #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -205,4 +202,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index b9e9ef6623f..dc7f54578e2 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -156,7 +156,6 @@ portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) /*-----------------------------------------------------------*/ - /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index c6dad99857c..746f734b8ac 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -61,12 +66,14 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index c6dad99857c..746f734b8ac 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -61,12 +66,14 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index dd729f13f2c..36e38f75d21 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -243,8 +245,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 4fe8c59147a..19da9b0ecfe 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 4fe8c59147a..19da9b0ecfe 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h index 33bfb283461..cc643459770 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h index 33bfb283461..cc643459770 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -60,8 +65,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index 25058026973..b15193f1ee5 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -28,11 +28,13 @@ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -301,12 +303,15 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) #ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY - #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html" + #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. *www.FreeRTOS.org/FreeRTOS-V10.3.x.html" #define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0 #endif /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index a3b2b46c989..443661a6d3a 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index adb47d8420f..c9bad40cf98 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index adb47d8420f..c9bad40cf98 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 897fef5f468..82529f998ad 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -245,8 +247,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index fec6923394c..c45dd21c29e 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index fec6923394c..c45dd21c29e 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -65,8 +70,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CRx_No_GIC/portmacro.h b/portable/GCC/ARM_CRx_No_GIC/portmacro.h index be5c6121e40..e8e54103439 100644 --- a/portable/GCC/ARM_CRx_No_GIC/portmacro.h +++ b/portable/GCC/ARM_CRx_No_GIC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -173,9 +175,10 @@ void vPortTaskUsesFPU( void ); #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif - +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ATMega323/portmacro.h b/portable/GCC/ATMega323/portmacro.h index c22706de5d4..a9f317e6d65 100644 --- a/portable/GCC/ATMega323/portmacro.h +++ b/portable/GCC/ATMega323/portmacro.h @@ -36,9 +36,11 @@ Changes from V1.2.3 #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -104,8 +106,10 @@ extern void vPortYield( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 86014a135b5..5bc7c8b0877 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -85,9 +85,11 @@ #include "intc.h" #include "compiler.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -691,8 +693,10 @@ extern void *pvPortRealloc( void *pv, size_t xSize ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 86000665681..37e739bb55f 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include @@ -148,8 +150,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) /*---------------------------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 9b4c1da8833..14d318b7b94 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index 4c87abd163c..829ff9f9b64 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -133,8 +135,10 @@ extern void* pxCurrentTCB; \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index 9202510ccc8..a864419899f 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -241,8 +243,10 @@ typedef unsigned char UBaseType_t; #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/IA32_flat/portmacro.h b/portable/GCC/IA32_flat/portmacro.h index 060eb4daa69..3a0cd287c8e 100644 --- a/portable/GCC/IA32_flat/portmacro.h +++ b/portable/GCC/IA32_flat/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -284,8 +286,10 @@ above the max system call interrupt priority. */ #define portAPIC_PROCESSOR_PRIORITY ( *( ( volatile uint32_t * ) ( configAPIC_BASE + 0xA0UL ) ) ) #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( portAPIC_PROCESSOR_PRIORITY ) <= ( portMAX_API_CALL_PRIORITY ) ) +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 8d6827a57e9..5a6e1db9c94 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -122,8 +124,10 @@ extern void vPortYield( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlaze/portmacro.h b/portable/GCC/MicroBlaze/portmacro.h index c646496ab3f..b04c526a42d 100644 --- a/portable/GCC/MicroBlaze/portmacro.h +++ b/portable/GCC/MicroBlaze/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -122,8 +124,10 @@ void vTaskSwitchContext(); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlazeV8/portmacro.h b/portable/GCC/MicroBlazeV8/portmacro.h index be9dfacc42f..3ade98795df 100644 --- a/portable/GCC/MicroBlazeV8/portmacro.h +++ b/portable/GCC/MicroBlazeV8/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* BSP includes. */ #include @@ -365,8 +367,10 @@ void vPortExceptionsInstallHandlers( void ); void vApplicationExceptionRegisterDump( xPortRegisterDump *xRegisterDump ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index c26daa79fc2..9d10b5e5beb 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* BSP includes. */ #include @@ -370,8 +372,10 @@ void vPortExceptionsInstallHandlers( void ); void vApplicationExceptionRegisterDump( xPortRegisterDump *xRegisterDump ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/NiosII/portmacro.h b/portable/GCC/NiosII/portmacro.h index 7a159925db2..3ac552dd8fa 100644 --- a/portable/GCC/NiosII/portmacro.h +++ b/portable/GCC/NiosII/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include "sys/alt_irq.h" @@ -104,8 +106,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index d7f32944c40..e6f1c06c845 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -31,9 +31,11 @@ #include "xexception_l.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -113,8 +115,10 @@ void vPortYield( void ); void vPortSetupInterruptController( void ); BaseType_t xPortInstallInterruptHandler( uint8_t ucInterruptID, XInterruptHandler pxHandler, void *pvCallBackRef ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index d7f32944c40..e6f1c06c845 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -31,9 +31,11 @@ #include "xexception_l.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -113,8 +115,10 @@ void vPortYield( void ); void vPortSetupInterruptController( void ); BaseType_t xPortInstallInterruptHandler( uint8_t ucInterruptID, XInterruptHandler pxHandler, void *pvCallBackRef ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RISC-V/portmacro.h b/portable/GCC/RISC-V/portmacro.h index 1e72b1af547..727273aca5c 100644 --- a/portable/GCC/RISC-V/portmacro.h +++ b/portable/GCC/RISC-V/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -184,8 +186,10 @@ extern size_t xCriticalNesting; #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 863596685aa..4556e147a5e 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -144,8 +146,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index 80fc8c8041d..40682825dd1 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index 7e1fd1e0c92..7e7bbe88b3c 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index 7e1fd1e0c92..7e7bbe88b3c 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index 6b76374c882..881610515ee 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -182,8 +184,10 @@ /* Definition to allow compatibility with existing FreeRTOS Demo using flop.c. */ #define portTASK_USES_FLOATING_POINT() vPortTaskUsesDPFPU() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index 85262dac4d5..8d5e832c59e 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -135,8 +137,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/TriCore_1782/portmacro.h b/portable/GCC/TriCore_1782/portmacro.h index 734abc5949c..d17c7bc6882 100644 --- a/portable/GCC/TriCore_1782/portmacro.h +++ b/portable/GCC/TriCore_1782/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* System Includes. */ #include @@ -169,8 +171,10 @@ extern uint32_t uxPortSetInterruptMaskFromISR( void ); void vPortReclaimCSA( uint32_t *pxTCB ); #define portCLEAN_UP_TCB( pxTCB ) vPortReclaimCSA( ( uint32_t * ) ( pxTCB ) ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index e6f67cdd3b7..4ccc083c080 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -140,8 +142,10 @@ static __interrupt void P0_isr (void); #define OCD_ENABLED 0x81 #define OCD_ENABLED_ERASE 0x80 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CA5_No_GIC/portmacro.h b/portable/IAR/ARM_CA5_No_GIC/portmacro.h index 3d73f83fd16..b1dcf3878fb 100644 --- a/portable/IAR/ARM_CA5_No_GIC/portmacro.h +++ b/portable/IAR/ARM_CA5_No_GIC/portmacro.h @@ -29,14 +29,18 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + /* IAR includes. */ #ifdef __ICCARM__ #include - #ifdef __cplusplus - extern "C" { - #endif + /*----------------------------------------------------------- * Port specific definitions. @@ -146,11 +150,6 @@ #define portNOP() __asm volatile( "NOP" ) - - #ifdef __cplusplus - } /* extern C */ - #endif - /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in the source code because to do so would cause other compilers to generate warnings. */ @@ -159,4 +158,10 @@ #endif /* __ICCARM__ */ +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CA9/portmacro.h b/portable/IAR/ARM_CA9/portmacro.h index 87c83eaa05c..bce3013b5b4 100644 --- a/portable/IAR/ARM_CA9/portmacro.h +++ b/portable/IAR/ARM_CA9/portmacro.h @@ -29,15 +29,17 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + /* IAR includes. */ #ifdef __ICCARM__ #include - #ifdef __cplusplus - extern "C" { - #endif - /*----------------------------------------------------------- * Port specific definitions. * @@ -156,11 +158,6 @@ #define portNOP() __asm volatile( "NOP" ) - - #ifdef __cplusplus - } /* extern C */ - #endif - /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in the source code because to do so would cause other compilers to generate warnings. */ @@ -206,4 +203,10 @@ number of bits implemented by the interrupt controller. */ #define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) #define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacro.h b/portable/IAR/ARM_CM23/non_secure/portmacro.h index d845ac1caa0..9cf0e87fbc8 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -60,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h index d845ac1caa0..9cf0e87fbc8 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -48,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -60,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM3/portmacro.h b/portable/IAR/ARM_CM3/portmacro.h index c334978ea73..92763ac7809 100644 --- a/portable/IAR/ARM_CM3/portmacro.h +++ b/portable/IAR/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -203,8 +205,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacro.h b/portable/IAR/ARM_CM33/non_secure/portmacro.h index b6df20eb88f..380768fc03b 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,6 +49,7 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ @@ -57,6 +58,10 @@ #endif /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h index b6df20eb88f..815dca0861a 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacro.h b/portable/IAR/ARM_CM35P/non_secure/portmacro.h index a0efc1f9dcf..46bc4e24b56 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h index a0efc1f9dcf..46bc4e24b56 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -49,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -71,8 +76,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM4F/portmacro.h b/portable/IAR/ARM_CM4F/portmacro.h index 148c81d14ee..02f1d6ff038 100644 --- a/portable/IAR/ARM_CM4F/portmacro.h +++ b/portable/IAR/ARM_CM4F/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -202,8 +204,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacro.h b/portable/IAR/ARM_CM55/non_secure/portmacro.h index a3b510e282c..7829ee6186a 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h index a3b510e282c..7829ee6186a 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM7/r0p1/portmacro.h b/portable/IAR/ARM_CM7/r0p1/portmacro.h index 08867c289c3..a2a2d878868 100644 --- a/portable/IAR/ARM_CM7/r0p1/portmacro.h +++ b/portable/IAR/ARM_CM7/r0p1/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -205,8 +207,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacro.h b/portable/IAR/ARM_CM85/non_secure/portmacro.h index cfaae813eac..3b51cb5ff46 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h index cfaae813eac..3b51cb5ff46 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h @@ -29,11 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif - -#include "portmacrocommon.h" +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -54,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif @@ -76,8 +81,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -43,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -162,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -258,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -266,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -286,28 +301,30 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CRx_No_GIC/portmacro.h b/portable/IAR/ARM_CRx_No_GIC/portmacro.h index fbe4e166a59..82853af3ccf 100644 --- a/portable/IAR/ARM_CRx_No_GIC/portmacro.h +++ b/portable/IAR/ARM_CRx_No_GIC/portmacro.h @@ -31,9 +31,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -173,9 +175,10 @@ warnings. */ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif - +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 111f4d02667..69ba2f1d9ac 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -36,9 +36,11 @@ Changes from V1.2.3 #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ void vPortYield( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index eb48b59b589..73c206ca5f8 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -87,9 +87,11 @@ #include "intc.h" #include "compiler.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -679,8 +681,10 @@ extern void *pvPortRealloc( void *pv, size_t xSize ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 173d3fbaa2d..01c6eed6aad 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -105,8 +107,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index 3d1d82ec17b..db6f10cca85 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -108,8 +110,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index f41d8b3445e..8fdf1fe75a3 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -107,8 +109,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RISC-V/portmacro.h b/portable/IAR/RISC-V/portmacro.h index 42def7367aa..5dddb454c79 100644 --- a/portable/IAR/RISC-V/portmacro.h +++ b/portable/IAR/RISC-V/portmacro.h @@ -32,9 +32,11 @@ #include "intrinsics.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -176,8 +178,10 @@ extern size_t xCriticalNesting; #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 9c74a3f0452..9685b569d5d 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -31,9 +31,11 @@ #ifdef __IAR_SYSTEMS_ICC__ +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -141,9 +143,11 @@ extern volatile uint16_t usCriticalNesting; \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} -#endif /* __cplusplus */ + } +#endif +/* *INDENT-ON* */ #endif /* __IAR_SYSTEMS_ICC__ */ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index d408a1389a5..e53c43e4ce5 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -145,8 +147,10 @@ undefined - all warnings have been manually checked and are not an issue, and the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index e5decdaf34a..1b1bec3fc51 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -134,8 +136,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index e0204fa0c3c..f8fde27908e 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -33,9 +33,11 @@ /* Hardware specifics. */ #include - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -191,8 +193,10 @@ * the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index 984a4fb50e2..3f84484487d 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ undefined - all warnings have been manually checked and are not an issue, and the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR71x/portmacro.h b/portable/IAR/STR71x/portmacro.h index 119eec84786..1fbadff3ebe 100644 --- a/portable/IAR/STR71x/portmacro.h +++ b/portable/IAR/STR71x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -115,8 +117,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR75x/portmacro.h b/portable/IAR/STR75x/portmacro.h index 674505d3f0c..94a7b72ed0b 100644 --- a/portable/IAR/STR75x/portmacro.h +++ b/portable/IAR/STR75x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ #define portCHAR char @@ -106,8 +108,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR91x/portmacro.h b/portable/IAR/STR91x/portmacro.h index 43ea6d7e8dc..b5429b57dbc 100644 --- a/portable/IAR/STR91x/portmacro.h +++ b/portable/IAR/STR91x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ #define portCHAR char @@ -108,8 +110,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/V850ES/portmacro.h b/portable/IAR/V850ES/portmacro.h index 76b1186761d..cfae9ae7ca9 100644 --- a/portable/IAR/V850ES/portmacro.h +++ b/portable/IAR/V850ES/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -130,8 +132,10 @@ extern void vTaskSwitchContext( void ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC24_dsPIC/portmacro.h b/portable/MPLAB/PIC24_dsPIC/portmacro.h index 83b695a43bb..0cc26f20633 100644 --- a/portable/MPLAB/PIC24_dsPIC/portmacro.h +++ b/portable/MPLAB/PIC24_dsPIC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -108,8 +110,10 @@ extern void vPortYield( void ); #define portNOP() asm volatile ( "NOP" ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MEC14xx/portmacro.h b/portable/MPLAB/PIC32MEC14xx/portmacro.h index d9aad5df0aa..9e00a1c05cc 100644 --- a/portable/MPLAB/PIC32MEC14xx/portmacro.h +++ b/portable/MPLAB/PIC32MEC14xx/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MX/portmacro.h b/portable/MPLAB/PIC32MX/portmacro.h index e2a1078e631..e0bf8dbd130 100644 --- a/portable/MPLAB/PIC32MX/portmacro.h +++ b/portable/MPLAB/PIC32MX/portmacro.h @@ -32,9 +32,11 @@ /* System include files */ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -196,8 +198,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MZ/portmacro.h b/portable/MPLAB/PIC32MZ/portmacro.h index 17b266e1124..371bcab6f26 100644 --- a/portable/MPLAB/PIC32MZ/portmacro.h +++ b/portable/MPLAB/PIC32MZ/portmacro.h @@ -32,9 +32,11 @@ /* System include files */ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -207,8 +209,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MikroC/ARM_CM4F/portmacro.h b/portable/MikroC/ARM_CM4F/portmacro.h index fa614c31488..b67a0a9d6a4 100644 --- a/portable/MikroC/ARM_CM4F/portmacro.h +++ b/portable/MikroC/ARM_CM4F/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -185,8 +187,10 @@ } /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Paradigm/Tern_EE/large_untested/portmacro.h b/portable/Paradigm/Tern_EE/large_untested/portmacro.h index ed513309c7c..292c9e260d6 100644 --- a/portable/Paradigm/Tern_EE/large_untested/portmacro.h +++ b/portable/Paradigm/Tern_EE/large_untested/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -100,9 +102,11 @@ typedef unsigned short UBaseType_t; #define portTASK_FUNCTION_PROTO( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Paradigm/Tern_EE/small/portmacro.h b/portable/Paradigm/Tern_EE/small/portmacro.h index 9eb5b863023..ff0b34b2065 100644 --- a/portable/Paradigm/Tern_EE/small/portmacro.h +++ b/portable/Paradigm/Tern_EE/small/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -102,8 +104,10 @@ typedef void ( __interrupt __far *pxISR )(); #define portTASK_FUNCTION_PROTO( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/RVDS/ARM_CA9/portmacro.h b/portable/RVDS/ARM_CA9/portmacro.h index 65dd8174f7c..1351dae9f60 100644 --- a/portable/RVDS/ARM_CA9/portmacro.h +++ b/portable/RVDS/ARM_CA9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -158,8 +160,10 @@ void vPortTaskUsesFPU( void ); #define portNOP() __nop() +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index 9bcbd3c8c3d..b82fdca2440 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -146,8 +148,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index 62a085023e2..55278dacb15 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -136,8 +138,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index 3b29cbddb50..60106ae13e7 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -137,8 +139,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index d67a8f892aa..6efba647dbd 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -137,8 +139,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index 12657ee82bf..fef13556e0d 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include @@ -181,8 +183,10 @@ /* Definition to allow compatibility with existing FreeRTOS Demo using flop.c. */ #define portTASK_USES_FLOATING_POINT() vPortTaskUsesDPFPU() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/SH2A_FPU/portmacro.h b/portable/Renesas/SH2A_FPU/portmacro.h index 422cd59ed48..0ff2e323afa 100644 --- a/portable/Renesas/SH2A_FPU/portmacro.h +++ b/portable/Renesas/SH2A_FPU/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -132,8 +134,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Tasking/ARM_CM4F/portmacro.h b/portable/Tasking/ARM_CM4F/portmacro.h index 3371f34f47a..1c3588b8630 100644 --- a/portable/Tasking/ARM_CM4F/portmacro.h +++ b/portable/Tasking/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -128,8 +130,10 @@ #define portNOP() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h index b82e48695d9..2c6979faf53 100644 --- a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h +++ b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h @@ -30,10 +30,13 @@ #include extern void vPortYield(void); + +/* *INDENT-OFF* */ #ifdef __cplusplus -class vPortYield; -extern "C" { + class vPortYield; + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- @@ -154,8 +157,10 @@ extern portLONG pendsvflag; +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/ARC_v1/portmacro.h b/portable/ThirdParty/GCC/ARC_v1/portmacro.h index 72fbb49759e..137cbc05d93 100644 --- a/portable/ThirdParty/GCC/ARC_v1/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_v1/portmacro.h @@ -30,9 +30,11 @@ #define PORTMACRO_H #include "embARC.h" - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /* record stack high address for stack check */ #ifndef configRECORD_STACK_HIGH_ADDRESS @@ -143,8 +145,10 @@ void vPortYield( void ); void vPortYieldFromIsr( void ); - #ifdef __cplusplus -} - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 3e7714eb836..8292f2d7ed9 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -152,8 +154,10 @@ extern void vPortYieldFromISR( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index d634c8b264e..5ac570d9a59 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -61,6 +61,10 @@ #include #include +#ifdef __APPLE__ + #include +#endif + /* Scheduler includes. */ #include "FreeRTOS.h" #include "task.h" @@ -146,6 +150,11 @@ portSTACK_TYPE * pxPortInitialiseStack( StackType_t * pxTopOfStack, pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + #ifdef __APPLE__ + pxEndOfStack = mach_vm_round_page ( pxEndOfStack ); + ulStackSize = mach_vm_trunc_page ( ulStackSize ); + #endif + thread->pxCode = pxCode; thread->pvParams = pvParameters; thread->xDying = pdFALSE; diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index 68655861202..a3ab7d32393 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include @@ -130,8 +132,10 @@ extern unsigned long ulPortGetRunTime( void ); #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* no-op */ #define portGET_RUN_TIME_COUNTER_VALUE() ulPortGetRunTime() +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 6ad352a04ba..cec4f7fedeb 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ #include "pico.h" #include "hardware/sync.h" @@ -257,8 +259,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h index c383188a2a9..35c6f246352 100644 --- a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h +++ b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h @@ -29,9 +29,11 @@ #ifndef RP2040_CONFIG_H #define RP2040_CONFIG_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 means set the exception handlers dynamically on cores * that need them in case the user has set up distinct vector table offsets per core @@ -82,8 +84,10 @@ extern "C" { #define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2 #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -}; + } #endif +/* *INDENT-ON* */ #endif diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h index 18b47f3154e..738b5a18001 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h @@ -6,15 +6,19 @@ #pragma once +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /** * @brief Set up the SysTick interrupt */ void vPortSetupTimer(void); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h index cb20b188504..78ef67da888 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h @@ -42,9 +42,11 @@ #ifndef XTENSA_CONFIG_H #define XTENSA_CONFIG_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ #include #include @@ -146,9 +148,10 @@ #define XT_STACK_EXTRA ( XT_XTRA_SIZE ) #define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE ) - - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* XTENSA_CONFIG_H */ diff --git a/portable/ThirdParty/XCC/Xtensa/portmacro.h b/portable/ThirdParty/XCC/Xtensa/portmacro.h index c81576c9f78..f84e4335e1e 100644 --- a/portable/ThirdParty/XCC/Xtensa/portmacro.h +++ b/portable/ThirdParty/XCC/Xtensa/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #ifndef __ASSEMBLER__ @@ -204,8 +206,10 @@ static inline void vPortCleanUpTcbClib(struct _reent *ptr) #endif // __ASSEMBLER__ +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/XCC/Xtensa/xtensa_config.h b/portable/ThirdParty/XCC/Xtensa/xtensa_config.h index 18f3e82c555..a5efcda5898 100644 --- a/portable/ThirdParty/XCC/Xtensa/xtensa_config.h +++ b/portable/ThirdParty/XCC/Xtensa/xtensa_config.h @@ -39,9 +39,11 @@ #ifndef XTENSA_CONFIG_H #define XTENSA_CONFIG_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include #include @@ -180,8 +182,10 @@ extern "C" { #define XT_STACK_EXTRA_CLIB (XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* XTENSA_CONFIG_H */ diff --git a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h index a2b4c16a40b..952e3f68021 100644 --- a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h +++ b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -105,8 +107,10 @@ void portENABLE_INTERRUPTS( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/oWatcom/16BitDOS/PC/portmacro.h b/portable/oWatcom/16BitDOS/PC/portmacro.h index ab4eea87210..2fb7534117a 100644 --- a/portable/oWatcom/16BitDOS/PC/portmacro.h +++ b/portable/oWatcom/16BitDOS/PC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,9 +108,11 @@ void portENABLE_INTERRUPTS( void ); #define portTASK_FUNCTION_PROTO( vTaskFunction, pvParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, pvParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/stream_buffer.c b/stream_buffer.c index e3c62ab1da2..890202e601a 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -324,7 +324,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) { - uint8_t * pucAllocatedMemory; + void * pvAllocatedMemory; uint8_t ucFlags; /* In case the stream buffer is going to be used as a message buffer @@ -364,31 +364,31 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, if( xBufferSizeBytes < ( xBufferSizeBytes + 1 + sizeof( StreamBuffer_t ) ) ) { xBufferSizeBytes++; - pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + pvAllocatedMemory = pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); } else { - pucAllocatedMemory = NULL; + pvAllocatedMemory = NULL; } - if( pucAllocatedMemory != NULL ) + if( pvAllocatedMemory != NULL ) { - prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ - pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pvAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + ( ( uint8_t * ) pvAllocatedMemory ) + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ xBufferSizeBytes, xTriggerLevelBytes, ucFlags, pxSendCompletedCallback, pxReceiveCompletedCallback ); - traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xIsMessageBuffer ); } else { traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); } - return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + return ( StreamBufferHandle_t ) pvAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ diff --git a/tasks.c b/tasks.c index 5d8d3233ee3..0a4a1fe672b 100644 --- a/tasks.c +++ b/tasks.c @@ -451,7 +451,7 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock * and must not be done from an ISR. Reads must be protected by either lock and may be done * from either an ISR or a task. */ -PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U; #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -3012,7 +3012,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceTASK_RESUME_FROM_ISR( pxTCB ); /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { #if ( configNUMBER_OF_CORES == 1 ) { @@ -3511,7 +3511,7 @@ BaseType_t xTaskResumeAll( void ) --uxSchedulerSuspended; portRELEASE_TASK_LOCK(); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { @@ -4050,7 +4050,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /* Arrange for xTickCount to reach xNextTaskUnblockTime in * xTaskIncrementTick() when the scheduler resumes. This ensures * that any delayed tasks are resumed at the correct time. */ - configASSERT( uxSchedulerSuspended ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ @@ -4079,7 +4079,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) /* Must not be called with the scheduler suspended as the implementation * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ @@ -4208,7 +4208,7 @@ BaseType_t xTaskIncrementTick( void ) * responsibility to increment the tick, or increment the pended ticks if the * scheduler is suspended. If pended ticks is greater than zero, the core that * calls xTaskResumeAll has the responsibility to increment the tick. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { /* Minor optimisation. The tick count cannot change in this * block. */ @@ -4551,7 +4551,7 @@ BaseType_t xTaskIncrementTick( void ) #if ( configNUMBER_OF_CORES == 1 ) void vTaskSwitchContext( void ) { - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { /* The scheduler is currently suspended - do not allow a context * switch. */ @@ -4640,7 +4640,7 @@ BaseType_t xTaskIncrementTick( void ) * SMP port. */ configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 ); - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { /* The scheduler is currently suspended - do not allow a context * switch. */ @@ -4748,7 +4748,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != 0 ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -4823,7 +4823,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); @@ -4895,7 +4895,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event flags implementation. */ - configASSERT( uxSchedulerSuspended != pdFALSE ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); /* Store the new item value in the event list. */ listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); @@ -5890,7 +5890,7 @@ static void prvResetNextTaskUnblockTime( void ) taskENTER_CRITICAL(); #endif { - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { xReturn = taskSCHEDULER_RUNNING; } @@ -7174,7 +7174,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -7285,7 +7285,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); From 56a360e300257ee6e7355244bbfbd545ff59f6a5 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 21 Jul 2023 13:32:00 +0800 Subject: [PATCH 162/164] Merge main to SMP branch (#86) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi * ARMv7M: Adjust implemented priority bit assertions (#665) Adjust assertions related to the CMSIS __NVIC_PRIO_BITS and FreeRTOS configPRIO_BITS configuration macros such that these macros specify the minimum number of implemented priority bits supported by a config build rather than the exact number of implemented priority bits. Related to Qemu issue #1122 * Format portmacro.h in arm CM0 ports * portable/ARM_CM0: Add xPortIsInsideInterrupt Add missing xPortIsInsideInterrupt function to Cortex-M0 port. * tree-wide: Unify formatting of __cplusplus ifdefs * Paranthesize expression-like macro (#668) * Updated tasks.c checks for scheduler suspension (#670) This commit updates the checks for the variable uxSchedulerSuspended in tasks.c module to use a uniform format. Signed-off-by: Sudeep Mohanty * Fix cast alignment warning (#669) * Fix cast alignment warning Without this change, the code produces the following warning when compiled with `-Wcast-align` flag: ``` cast increases required alignment of target type ``` Signed-off-by: Gaurav Aggarwal * Align StackSize and StackAddress for macOS (#674) * Armv8-M (except Cortex-M23) interrupt priority checking (#673) * Armv8-M: Formatting changes Signed-off-by: Devaraj Ranganna * Armv8-M: Add support for interrupt priority check FreeRTOS provides `FromISR` system calls which can be called directly from interrupt service routines. It is crucial that the priority of these ISRs is set to same or lower value (numerically higher) than that of `configMAX_SYSCALL_INTERRUPT_PRIORITY`. For more information refer to https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. Add a check to trigger an assert when an ISR with priority higher (numerically lower) than `configMAX_SYSCALL_INTERRUPT_PRIORITY` calls `FromISR` system calls if `configASSERT` macro is defined. In addition, add a config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` to disable interrupt priority check while running on QEMU. Based on the discussion https://gitlab.com/qemu-project/qemu/-/issues/1122, The interrupt priority bits in QEMU do not match the real hardware. Therefore the assert that checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. The config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the `FreeRTOSConfig.h` for QEMU targets. Signed-off-by: Devaraj Ranganna * Use SHPR2 for calculating interrupt priority bits This removes the dependency on the secure software to mark the interrupt as non-secure. Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Use the extended movx instruction instead of mov (#676) The following is from the MSP430X instruction set - ``` MOVX.W Move source word to destination word. The source operand is copied to the destination. The source operand is not affected. Both operands may be located in the full address space. ``` The movx instruction allows both the operands to be located in the full address space and therefore, works with large data model as well. Signed-off-by: Gaurav Aggarwal * Fix eTaskGetState for pending ready tasks (#679) This commit fixes eTaskGetState so that eReady is returned for pending ready tasks. Co-authored-by: Darian Leung * Generates SBOM after source files are updated with release tag (#680) * update source file with release version info before SBOM generation * delete tag branch during cleanup * Add back croutines by reverting PR#590 (#685) * Add croutines to the code base * Add croutine changes to cmake, lexicon and readme * Add croutine file to portable cmake file * Add back more references from PR 591 * Remove __NVIC_PRIO_BITS and configPRIO_BITS check in port (#683) * Remove __NVIC_PRIO_BITS and configPRIO_BITS check in CM3, CM4 and ARMv8. * Add hardware not implemented bits check. These bits should be zero. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Use UBaseType_t as interrupt mask (#689) * Use UBaseType_t as interrupt mask * Update GCC posix port to use UBaseType_t as interrupt mask * Fix clang warning in croutine and stream buffer (#686) * Fix document warning in croutine * Fix cast-qual warning in stream buffer * Use portTASK_FUNCTION_PROTO to replace portNORETURN (#688) * Use portTASK_FUNCTION_PROTO to replace portNORETURN * Fix typo in check comment of configMAX_SYSCALL_INTERRUPT_PRIORITY (#690) * Add constant type for portMAX_DELAY in port (#691) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update static stream buffer size check (#693) * Use volatile size instead of sizeof directly to prevent always true/false warning * Fix typos in comments for the AT91SAM7S port (#695) Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> * Fix #697: Missing portPOINTER_SIZE_TYPE definition for ATmega port (#698) * Remove empty expression statement compiler warning (#692) * Add do while( 0 ) loop for empty expression statement compiler warning * Update uxTaskGetSystemState for tasks in pending ready list (#702) * Update uxTaskGetSystemState to sync with eTaskGetState * Update in vTaskGetInfo for tasks in pending ready list should be in ready state. * Fix circular dependency in CMake project (#700) * Fix circular dependency in cmake project Fix for https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/687 In order for custom ports to also break the cycle, they must link against freertos_kernel_include instead of freertos_kernel. * Simplify include path * Memory Protection Unit (MPU) Enhancements (#705) Memory Protection Unit (MPU) Enhancements This commit introduces a new MPU wrapper that places additional restrictions on unprivileged tasks. The following is the list of changes introduced with the new MPU wrapper: 1. Opaque and indirectly verifiable integers for kernel object handles: All the kernel object handles (for example, queue handles) are now opaque integers. Previously object handles were raw pointers. 2. Saving the task context in Task Control Block (TCB): When a task is swapped out by the scheduler, the task's context is now saved in its TCB. Previously the task's context was saved on its stack. 3. Execute system calls on a separate privileged only stack: FreeRTOS system calls, which execute with elevated privilege, now use a separate privileged only stack. Previously system calls used the calling task's stack. The application writer can control the size of the system call stack using new configSYSTEM_CALL_STACK_SIZE config macro. 4. Memory bounds checks: FreeRTOS system calls which accept a pointer and de-reference it, now verify that the calling task has required permissions to access the memory location referenced by the pointer. 5. System call restrictions: The following system calls are no longer available to unprivileged tasks: - vQueueDelete - xQueueCreateMutex - xQueueCreateMutexStatic - xQueueCreateCountingSemaphore - xQueueCreateCountingSemaphoreStatic - xQueueGenericCreate - xQueueGenericCreateStatic - xQueueCreateSet - xQueueRemoveFromSet - xQueueGenericReset - xTaskCreate - xTaskCreateStatic - vTaskDelete - vTaskPrioritySet - vTaskSuspendAll - xTaskResumeAll - xTaskGetHandle - xTaskCallApplicationTaskHook - vTaskList - vTaskGetRunTimeStats - xTaskCatchUpTicks - xEventGroupCreate - xEventGroupCreateStatic - vEventGroupDelete - xStreamBufferGenericCreate - xStreamBufferGenericCreateStatic - vStreamBufferDelete - xStreamBufferReset Also, an unprivileged task can no longer use vTaskSuspend to suspend any task other than itself. We thank the following people for their inputs in these enhancements: - David Reiss of Meta Platforms, Inc. - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling of School of Computer Science and Engineering, Southeast University, China. - Xinwen Fu of Department of Computer Science, University of Massachusetts Lowell, USA. - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado Boulder, USA. * Update History for Version 10.6.0 (#706) Signed-off-by: kar-rahul-aws * Fixed compile options polluting project (#694) * Fixed compile options polluting project Moved add_library higher * Apply suggestions from code review Co-authored-by: Paul Bartell * fixed cmakelists keeping in mind the suggestions --------- Co-authored-by: Paul Bartell Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> * Fix the comments in the CM3 and CM4 MPU Ports about the MPU Region numbers being loaded (#707) Co-authored-by: Soren Ptak Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update xSemaphoreGetStaticBuffer prototype in comment (#704) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Correct the misspelled name (#708) Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Signed-off-by: Sudeep Mohanty Signed-off-by: kar-rahul-aws Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi Co-authored-by: Sudeep Mohanty <91244425+sudeep-mohanty@users.noreply.github.com> Co-authored-by: Monika Singh <108652024+moninom1@users.noreply.github.com> Co-authored-by: Darian Leung Co-authored-by: Tony Josi Co-authored-by: Evgeny Ermakov <22344340+unspecd@users.noreply.github.com> Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Co-authored-by: Joris Putcuyps Co-authored-by: Patrick Cook <114708437+cookpate@users.noreply.github.com> Co-authored-by: Mr. Jake Co-authored-by: Paul Bartell Co-authored-by: Soren Ptak Co-authored-by: Soren Ptak --- .github/lexicon.txt | 27 +- .github/workflows/auto-release.yml | 24 +- CMakeLists.txt | 19 +- History.txt | 111 + README.md | 3 +- croutine.c | 363 ++ event_groups.c | 6 +- include/CMakeLists.txt | 15 + include/FreeRTOS.h | 20 +- include/croutine.h | 755 +++ include/mpu_prototypes.h | 241 +- include/mpu_wrappers.h | 232 +- include/portable.h | 24 +- include/queue.h | 24 +- include/semphr.h | 7 +- include/stack_macros.h | 12 +- include/task.h | 18 +- portable/ARMv8M/copy_files.py | 12 + portable/ARMv8M/non_secure/port.c | 759 ++- .../GCC/ARM_CM23/mpu_wrappers_v2_asm.c | 2419 ++++++++++ .../portable/GCC/ARM_CM23/portasm.c | 558 ++- .../portable/GCC/ARM_CM23/portmacro.h | 1 - .../GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c | 2419 ++++++++++ .../portable/GCC/ARM_CM23_NTZ/portasm.c | 481 +- .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 1 - .../GCC/ARM_CM33/mpu_wrappers_v2_asm.c | 2349 ++++++++++ .../portable/GCC/ARM_CM33/portasm.c | 528 ++- .../portable/GCC/ARM_CM33/portmacro.h | 1 - .../GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c | 2349 ++++++++++ .../portable/GCC/ARM_CM33_NTZ/portasm.c | 412 +- .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 1 - .../portable/GCC/ARM_CM35P/portmacro.h | 1 - .../portable/GCC/ARM_CM55/portmacro.h | 1 - .../portable/GCC/ARM_CM85/portmacro.h | 1 - .../IAR/ARM_CM23/mpu_wrappers_v2_asm.S | 1623 +++++++ .../portable/IAR/ARM_CM23/portasm.s | 425 +- .../IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S | 1623 +++++++ .../portable/IAR/ARM_CM23_NTZ/portasm.s | 374 +- .../IAR/ARM_CM33/mpu_wrappers_v2_asm.S | 1552 +++++++ .../portable/IAR/ARM_CM33/portasm.s | 359 +- .../IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S | 1552 +++++++ .../portable/IAR/ARM_CM33_NTZ/portasm.s | 302 +- portable/ARMv8M/non_secure/portmacrocommon.h | 146 +- portable/CCS/ARM_CM3/port.c | 26 +- portable/CCS/ARM_CM4F/port.c | 26 +- portable/CMakeLists.txt | 2 +- portable/Common/mpu_wrappers.c | 4 +- portable/Common/mpu_wrappers_v2.c | 4121 +++++++++++++++++ portable/GCC/ARM7_AT91SAM7S/port.c | 2 +- portable/GCC/ARM_CM0/portmacro.h | 1 - .../ARM_CM23/non_secure/mpu_wrappers_v2_asm.c | 2419 ++++++++++ portable/GCC/ARM_CM23/non_secure/port.c | 759 ++- portable/GCC/ARM_CM23/non_secure/portasm.c | 558 ++- portable/GCC/ARM_CM23/non_secure/portmacro.h | 1 - .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2419 ++++++++++ portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 759 ++- .../GCC/ARM_CM23_NTZ/non_secure/portasm.c | 481 +- .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 1 - .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 146 +- portable/GCC/ARM_CM3/port.c | 26 +- portable/GCC/ARM_CM3/portmacro.h | 1 - .../ARM_CM33/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM33/non_secure/port.c | 759 ++- portable/GCC/ARM_CM33/non_secure/portasm.c | 528 ++- portable/GCC/ARM_CM33/non_secure/portmacro.h | 1 - .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 759 ++- .../GCC/ARM_CM33_NTZ/non_secure/portasm.c | 412 +- .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 1 - .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM35P/non_secure/port.c | 759 ++- portable/GCC/ARM_CM35P/non_secure/portasm.c | 528 ++- portable/GCC/ARM_CM35P/non_secure/portmacro.h | 1 - .../ARM_CM35P/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 759 ++- .../GCC/ARM_CM35P_NTZ/non_secure/portasm.c | 412 +- .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 1 - .../non_secure/portmacrocommon.h | 146 +- .../GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM3_MPU/port.c | 694 ++- portable/GCC/ARM_CM3_MPU/portmacro.h | 57 +- portable/GCC/ARM_CM4F/port.c | 26 +- portable/GCC/ARM_CM4F/portmacro.h | 1 - .../GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM4_MPU/port.c | 820 +++- portable/GCC/ARM_CM4_MPU/portmacro.h | 56 +- .../ARM_CM55/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM55/non_secure/port.c | 759 ++- portable/GCC/ARM_CM55/non_secure/portasm.c | 528 ++- portable/GCC/ARM_CM55/non_secure/portmacro.h | 1 - .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 759 ++- .../GCC/ARM_CM55_NTZ/non_secure/portasm.c | 412 +- .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 1 - .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 146 +- portable/GCC/ARM_CM7/r0p1/port.c | 26 +- portable/GCC/ARM_CM7/r0p1/portmacro.h | 1 - .../ARM_CM85/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM85/non_secure/port.c | 759 ++- portable/GCC/ARM_CM85/non_secure/portasm.c | 528 ++- portable/GCC/ARM_CM85/non_secure/portmacro.h | 1 - .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 759 ++- .../GCC/ARM_CM85_NTZ/non_secure/portasm.c | 412 +- .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 1 - .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 146 +- portable/GCC/AVR32_UC3/portmacro.h | 2 +- portable/GCC/CORTUS_APS3/portmacro.h | 2 +- portable/GCC/ColdFire_V2/portmacro.h | 2 +- portable/GCC/H8S2329/portmacro.h | 2 +- portable/GCC/HCS12/portmacro.h | 2 +- portable/GCC/MSP430F449/portmacro.h | 2 +- portable/GCC/PPC405_Xilinx/portmacro.h | 2 +- portable/GCC/PPC440_Xilinx/portmacro.h | 2 +- portable/GCC/RL78/portmacro.h | 2 +- portable/GCC/STR75x/portmacro.h | 2 +- portable/IAR/78K0R/portmacro.h | 2 +- .../ARM_CM23/non_secure/mpu_wrappers_v2_asm.S | 1623 +++++++ portable/IAR/ARM_CM23/non_secure/port.c | 759 ++- portable/IAR/ARM_CM23/non_secure/portasm.s | 425 +- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1623 +++++++ portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 759 ++- .../IAR/ARM_CM23_NTZ/non_secure/portasm.s | 374 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 146 +- portable/IAR/ARM_CM3/port.c | 26 +- .../ARM_CM33/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM33/non_secure/port.c | 759 ++- portable/IAR/ARM_CM33/non_secure/portasm.s | 359 +- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 759 ++- .../IAR/ARM_CM33_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM35P/non_secure/port.c | 759 ++- portable/IAR/ARM_CM35P/non_secure/portasm.s | 359 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 759 ++- .../IAR/ARM_CM35P_NTZ/non_secure/portasm.s | 302 +- .../non_secure/portmacrocommon.h | 146 +- portable/IAR/ARM_CM4F/port.c | 26 +- .../IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S | 1556 +++++++ portable/IAR/ARM_CM4F_MPU/port.c | 545 ++- portable/IAR/ARM_CM4F_MPU/portasm.s | 281 +- portable/IAR/ARM_CM4F_MPU/portmacro.h | 55 +- .../ARM_CM55/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM55/non_secure/port.c | 759 ++- portable/IAR/ARM_CM55/non_secure/portasm.s | 359 +- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 759 ++- .../IAR/ARM_CM55_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 146 +- portable/IAR/ARM_CM7/r0p1/port.c | 26 +- .../ARM_CM85/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM85/non_secure/port.c | 759 ++- portable/IAR/ARM_CM85/non_secure/portasm.s | 359 +- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 759 ++- .../IAR/ARM_CM85_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 146 +- portable/IAR/ATMega323/portmacro.h | 2 +- portable/IAR/AVR32_UC3/portmacro.h | 2 +- portable/IAR/AVR_AVRDx/portmacro.h | 2 +- portable/IAR/AVR_Mega0/portmacro.h | 2 +- portable/IAR/AtmelSAM7S64/port.c | 2 +- portable/IAR/AtmelSAM7S64/portmacro.h | 2 +- portable/IAR/AtmelSAM9XE/portmacro.h | 2 +- portable/IAR/LPC2000/portmacro.h | 2 +- portable/IAR/MSP430/portmacro.h | 2 +- portable/IAR/MSP430X/portmacro.h | 2 +- portable/IAR/RL78/portmacro.h | 2 +- portable/MikroC/ARM_CM4F/port.c | 26 +- portable/RVDS/ARM_CM3/port.c | 26 +- portable/RVDS/ARM_CM4F/port.c | 26 +- .../RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c | 1993 ++++++++ portable/RVDS/ARM_CM4_MPU/port.c | 782 +++- portable/RVDS/ARM_CM4_MPU/portmacro.h | 55 +- portable/RVDS/ARM_CM7/r0p1/port.c | 26 +- portable/ThirdParty/GCC/ATmega/portmacro.h | 2 + portable/ThirdParty/GCC/Posix/port.c | 8 +- portable/ThirdParty/GCC/Posix/portmacro.h | 8 +- .../ThirdParty/GCC/RP2040/include/portmacro.h | 5 +- portable/ThirdParty/GCC/RP2040/library.cmake | 1 + .../GCC/Xtensa_ESP32/include/portmacro.h | 2 - .../ThirdParty/xClang/XCOREAI/portmacro.h | 4 +- portable/WizC/PIC18/portmacro.h | 2 +- queue.c | 321 +- stream_buffer.c | 40 +- tasks.c | 120 +- timers.c | 4 +- 200 files changed, 96733 insertions(+), 7659 deletions(-) create mode 100644 croutine.c create mode 100644 include/CMakeLists.txt create mode 100644 include/croutine.h create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S create mode 100644 portable/Common/mpu_wrappers_v2.c create mode 100644 portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c mode change 100755 => 100644 portable/GCC/ARM_CM3/port.c create mode 100644 portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c mode change 100755 => 100644 portable/GCC/ARM_CM3_MPU/port.c mode change 100755 => 100644 portable/GCC/ARM_CM4F/port.c create mode 100644 portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c mode change 100755 => 100644 portable/GCC/ARM_CM4_MPU/port.c create mode 100644 portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 90452c54c56..853c73f220e 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1,3 +1,4 @@ + GNU nano 5.9 .github/lexicon.txt Modified aa aaaa aarch @@ -317,6 +318,7 @@ coproc coprocessor coprocessors coreid +coroutinehandle coverity covfs cp @@ -344,10 +346,16 @@ cpu cr crc crcb +crcoroutine +crdelay creadonlyarray creadwritearray createevent +crend crgint +croutine +crqueue +crstart crt crtv crxedchar @@ -1526,18 +1534,21 @@ prvcheckforrunstatechange prvcheckinterfaces prvchecktaskswaitingtermination prvcopydatatoqueue +prvcoroutineflashtask +prvcoroutineflashworktask prvdeletetcb prvexitfunction prvgettimens prvheapinit prvidletask +prvinitialisecoroutinelists prvinitialisemutex prvinitialisenewstreambuffer prvinitialisenewtimer prvinsertblockintofreelist prvlockqueue -prvnotifyqueuesetcontainer prvminimalidletask +prvnotifyqueuesetcontainer prvportmalloc prvportresetpic prvprocesssimulatedinterrupts @@ -1647,11 +1658,15 @@ pxblocktoinsert pxcallbackfunction pxcode pxcontainer +pxcoroutinecode +pxcoroutinewoken pxcrcb pxcreatedtask +pxcurrentcoroutine pxcurrenttcb pxcurrenttcbconst pxcurrenttimerlist +pxdelayedcoroutinelist pxdelayedtasklist pxend pxendofstack @@ -1686,6 +1701,7 @@ pxnextfreeblock pxnexttcb pxoriginalsp pxoriginaltos +pxoverflowdelayedcoroutinelist pxoverflowdelayedtasklist pxowner pxportinitialisestack @@ -1695,6 +1711,7 @@ pxqueue pxqueuebuffer pxqueuesetcontainer pxramstack +pxreadycoroutinelists pxreadytaskslists pxreceivecompletedcallback pxregions @@ -2444,6 +2461,7 @@ uxprevschedulersuspended uxpriority uxprioritytouse uxqueue +uxqueuegetqueueitemsize uxqueuelength uxqueuemessageswaiting uxqueuespacesavailable @@ -2474,6 +2492,7 @@ uxtopreadypriority uxtopusedpriority uxvariabletoincrement uxwantedbytes +vacoroutine vadifferenttask vafunction val @@ -2504,6 +2523,7 @@ vbr vbufferisr vcallbackfunction vclearinterruptmask +vcoroutineschedule vddcore vec vectactive @@ -2513,6 +2533,7 @@ ver veventgroupclearbitscallback veventgroupdelete veventgroupsetbitscallback +vflashcoroutine vfp vfunction vic @@ -2575,12 +2596,14 @@ vqueuedelete vqueueunregisterqueue vr vraiseprivilege +vreceivingcoroutine vreg vresetprivilege vrestorecontextoffirsttask vrpm vsemaphorecreatebinary vsemaphoredelete +vsendingcoroutine vsetbacklightstate vsoftwareinterruptentry vstartfirsttask @@ -2734,6 +2757,7 @@ xdd xdddd xdeadbeef xdelay +xdelayedcoroutinelist xdelayedtasklist xdelaytime xe @@ -2888,6 +2912,7 @@ xpar xparameters xpendedcounts xpendedticks +xpendingreadycoroutinelist xpendingreadylist xperiod xportgetcoreid diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 3bf820a6f9c..426d8f0c01a 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -25,7 +25,6 @@ jobs: - name: Tool Setup uses: actions/setup-python@v2 with: - python-version: 3.7.10 architecture: x64 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -51,7 +50,18 @@ jobs: - name: create a new branch that references commit id working-directory: ./local_kernel - run: git checkout -b ${{ github.event.inputs.version_number }} ${{ github.event.inputs.commit_id }} + run: | + git checkout -b ${{ github.event.inputs.version_number }} ${{ github.event.inputs.commit_id }} + echo "COMMIT_SHA_1=$(git rev-parse HEAD)" >> $GITHUB_ENV + + - name: Update source files with version info + run: | + # Install deps and run + pip install -r ./tools/.github/scripts/release-requirements.txt + ./tools/.github/scripts/update_src_version.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA_1 }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} + exit $? + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Generate SBOM uses: FreeRTOS/CI-CD-Github-Actions/sbom-generator@main @@ -65,13 +75,19 @@ jobs: git add . git commit -m 'Update SBOM' git push -u origin ${{ github.event.inputs.version_number }} - echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV + echo "COMMIT_SHA_2=$(git rev-parse HEAD)" >> $GITHUB_ENV - name: Release run: | # Install deps and run pip install -r ./tools/.github/scripts/release-requirements.txt - ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} + ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA_2 }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} exit $? env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Cleanup + working-directory: ./local_kernel + run: | + # Delete the branch created for Tag by SBOM generator + git push -u origin --delete ${{ github.event.inputs.version_number }} diff --git a/CMakeLists.txt b/CMakeLists.txt index d45de64b1b5..c57d464028d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -225,9 +225,12 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " .)\n" " target_link_libraries(freertos_kernel_port\n" " PRIVATE\n" - " freertos_kernel)") + " freertos_kernel_include)") endif() + +add_library(freertos_kernel STATIC) + ######################################################################## # Overall Compile Options # Note the compile option strategy is to error on everything and then @@ -248,7 +251,7 @@ endif() # MSVC | MSVC # Note only for MinGW? # Renesas | ?TBD? -add_compile_options( +target_compile_options(freertos_kernel PRIVATE ### Gnu/Clang C Options $<$:-fdiagnostics-color=always> $<$:-fcolor-diagnostics> @@ -264,9 +267,11 @@ add_compile_options( ######################################################################## +add_subdirectory(include) add_subdirectory(portable) -add_library(freertos_kernel STATIC +target_sources(freertos_kernel PRIVATE + croutine.c event_groups.c list.c queue.c @@ -278,17 +283,11 @@ add_library(freertos_kernel STATIC $>,${FREERTOS_HEAP},portable/MemMang/heap_${FREERTOS_HEAP}.c> ) -target_include_directories(freertos_kernel - PUBLIC - include - # Note: DEPRECATED but still supported, may be removed in a future release. - $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> -) target_link_libraries(freertos_kernel PUBLIC - $<$:freertos_config> freertos_kernel_port + freertos_kernel_include ) ######################################################################## diff --git a/History.txt b/History.txt index 94b74e80363..98f9822d454 100644 --- a/History.txt +++ b/History.txt @@ -1,5 +1,116 @@ Documentation and download available at https://www.FreeRTOS.org/ +Changes between FreeRTOS V10.5.1 and FreeRTOS 10.6.0 released July 13, 2023 + + + Add a new MPU wrapper that places additional restrictions on unprivileged + tasks. The following is the list of changes introduced with the new MPU + wrapper: + + 1. Opaque and indirectly verifiable integers for kernel object handles: + All the kernel object handles (for example, queue handles) are now + opaque integers. Previously object handles were raw pointers. + 2. Save the task context in Task Control Block (TCB): When a task is + swapped out by the scheduler, the task's context is now saved in its + TCB. Previously the task's context was saved on its stack. + 3. Execute system calls on a separate privileged only stack: FreeRTOS + system calls, which execute with elevated privilege, now use a + separate privileged only stack. Previously system calls used the + calling task's stack. The application writer can control the size of + the system call stack using new configSYSTEM_CALL_STACK_SIZE config + macro. + 4. Memory bounds checks: FreeRTOS system calls which accept a pointer + and de-reference it, now verify that the calling task has required + permissions to access the memory location referenced by the pointer. + 5. System calls restrictions: The following system calls are no longer + available to unprivileged tasks: + - vQueueDelete + - xQueueCreateMutex + - xQueueCreateMutexStatic + - xQueueCreateCountingSemaphore + - xQueueCreateCountingSemaphoreStatic + - xQueueGenericCreate + - xQueueGenericCreateStatic + - xQueueCreateSet + - xQueueRemoveFromSet + - xQueueGenericReset + - xTaskCreate + - xTaskCreateStatic + - vTaskDelete + - vTaskPrioritySet + - vTaskSuspendAll + - xTaskResumeAll + - xTaskGetHandle + - xTaskCallApplicationTaskHook + - vTaskList + - vTaskGetRunTimeStats + - xTaskCatchUpTicks + - xEventGroupCreate + - xEventGroupCreateStatic + - vEventGroupDelete + - xStreamBufferGenericCreate + - xStreamBufferGenericCreateStatic + - vStreamBufferDelete + - xStreamBufferReset + Also, an unprivileged task can no longer use vTaskSuspend to suspend + any task other than itself. + + We thank the following people for their inputs in these enhancements: + - David Reiss of Meta Platforms, Inc. + - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling + of School of Computer Science and Engineering, Southeast University, + China. + - Xinwen Fu of Department of Computer Science, University of + Massachusetts Lowell, USA. + - Yueqi Chen, Zicheng Wang, Minghao Lin of University of Colorado + Boulder, USA. + + Add Cortex-M35P port. Contributed by @urutva. + + Add embedded extension (RV32E) support to the IAR RISC-V port. + + Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent APIs. Contributed by + @chrisnc. + + Add APIs to get the application supplied buffers from statically + created kernel objects. The following new APIs are added: + - xTaskGetStaticBuffers + - xQueueGetStaticBuffers + - xQueueGenericGetStaticBuffers + - xSemaphoreGetStaticBuffer + - xEventGroupGetStaticBuffer + - xStreamBufferGetStaticBuffers + - xMessageBufferGetStaticBuffers + These APIs enable the application writer to obtain static buffers from + the kernel object and free/reuse them at the time of deletion. Earlier + the application writer had to maintain the association of static buffers + and the kernel object in the application. Contributed by @Dazza0. + + Add Thread Local Storage (TLS) support using picolibc function. Contributed + by @keith-packard. + + Add configTICK_TYPE_WIDTH_IN_BITS to configure TickType_t data type. As a result, + the number of bits in an event group also increases with big data type. Contributed + by @Hadatko. + + Update eTaskGetState and uxTaskGetSystemState to return eReady for pending ready + tasks. Contributed by @Dazza0. + + Update heap_4 and heap_5 to add padding only if the resulting block is not + already aligned. + + Fix the scheduler logic in a couple of places to not preempt a task when an + equal priority task becomes ready. + + Add macros used in FreeRTOS-Plus libraries. Contributed by @Holden. + + Fix clang compiler warnings. Contributed by @phelter. + + Add assertions to ARMv8-M ports to detect when FreeRTOS APIs are called from + interrupts with priority higher than the configMAX_SYSCALL_INTERRUPT_PRIORITY. + Contributed by @urutva. + + Add xPortIsInsideInterrupt API to ARM_CM0 ports. + + Fix build warning in MSP430X port when large data model is used. + + Add the ability to use Cortex-R5 port on the parts without FPU. + + Fix build warning in heap implementations on PIC24/dsPIC. + + Update interrupt priority asserts for Cortex-M ports so that these do not fire + on QEMU which does not implement PRIO bits. + + Update ARMv7-M ports to ensure that kernel interrupts run at the lowest priority. + configKERNEL_INTERRUPT_PRIORITY is now obsolete for ARMv7-M ports and brings + these ports inline with the newer ARMv8-M ports. Contributed by @chrisnc. + + Fix build issue in POSIX GCC port on Windows Subsystem for Linux (WSL). Contributed + by @jacky309. + + Add portMEMORY_BARRIER to Microblaze port. Contributed by @bbain. + + Add portPOINTER_SIZE_TYPE definition for ATmega port. Contributed by @jputcu. + + Multiple improvements in the CMake support. Contributed by @phelte and @cookpate. + Changes between FreeRTOS V10.5.0 and FreeRTOS V10.5.1 released November 16 2022 + Updated the kernel version in manifest and SBOM diff --git a/README.md b/README.md index 952914daf46..dd79eee6ce7 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,8 @@ git clone git@github.com:FreeRTOS/FreeRTOS-Kernel.git ## Repository structure - The root of this repository contains the three files that are common to every port - list.c, queue.c and tasks.c. The kernel is contained within these -three files. +three files. croutine.c implements the optional co-routine functionality - which +is normally only used on very memory limited systems. - The ```./portable``` directory contains the files that are specific to a particular microcontroller and/or compiler. See the readme file in the ```./portable``` directory for more information. diff --git a/croutine.c b/croutine.c new file mode 100644 index 00000000000..f38e96247a3 --- /dev/null +++ b/croutine.c @@ -0,0 +1,363 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#include "FreeRTOS.h" +#include "task.h" +#include "croutine.h" + +/* Remove the whole file is co-routines are not being used. */ +#if ( configUSE_CO_ROUTINES != 0 ) + +/* + * Some kernel aware debuggers require data to be viewed to be global, rather + * than file scope. + */ + #ifdef portREMOVE_STATIC_QUALIFIER + #define static + #endif + + +/* Lists for ready and blocked co-routines. --------------------*/ + static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */ + static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */ + static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */ + static List_t * pxDelayedCoRoutineList = NULL; /*< Points to the delayed co-routine list currently being used. */ + static List_t * pxOverflowDelayedCoRoutineList = NULL; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */ + static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */ + +/* Other file private variables. --------------------------------*/ + CRCB_t * pxCurrentCoRoutine = NULL; + static UBaseType_t uxTopCoRoutineReadyPriority = 0; + static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0; + +/* The initial state of the co-routine when it is created. */ + #define corINITIAL_STATE ( 0 ) + +/* + * Place the co-routine represented by pxCRCB into the appropriate ready queue + * for the priority. It is inserted at the end of the list. + * + * This macro accesses the co-routine ready lists and therefore must not be + * used from within an ISR. + */ + #define prvAddCoRoutineToReadyQueue( pxCRCB ) \ + do { \ + if( ( pxCRCB )->uxPriority > uxTopCoRoutineReadyPriority ) \ + { \ + uxTopCoRoutineReadyPriority = ( pxCRCB )->uxPriority; \ + } \ + vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ ( pxCRCB )->uxPriority ] ), &( ( pxCRCB )->xGenericListItem ) ); \ + } while( 0 ) + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first co-routine. + */ + static void prvInitialiseCoRoutineLists( void ); + +/* + * Co-routines that are readied by an interrupt cannot be placed directly into + * the ready lists (there is no mutual exclusion). Instead they are placed in + * in the pending ready list in order that they can later be moved to the ready + * list by the co-routine scheduler. + */ + static void prvCheckPendingReadyList( void ); + +/* + * Macro that looks at the list of co-routines that are currently delayed to + * see if any require waking. + * + * Co-routines are stored in the queue in the order of their wake time - + * meaning once one co-routine has been found whose timer has not expired + * we need not look any further down the list. + */ + static void prvCheckDelayedList( void ); + +/*-----------------------------------------------------------*/ + + BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex ) + { + BaseType_t xReturn; + CRCB_t * pxCoRoutine; + + /* Allocate the memory that will store the co-routine control block. */ + pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) ); + + if( pxCoRoutine ) + { + /* If pxCurrentCoRoutine is NULL then this is the first co-routine to + * be created and the co-routine data structures need initialising. */ + if( pxCurrentCoRoutine == NULL ) + { + pxCurrentCoRoutine = pxCoRoutine; + prvInitialiseCoRoutineLists(); + } + + /* Check the priority is within limits. */ + if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES ) + { + uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1; + } + + /* Fill out the co-routine control block from the function parameters. */ + pxCoRoutine->uxState = corINITIAL_STATE; + pxCoRoutine->uxPriority = uxPriority; + pxCoRoutine->uxIndex = uxIndex; + pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode; + + /* Initialise all the other co-routine control block parameters. */ + vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) ); + vListInitialiseItem( &( pxCoRoutine->xEventListItem ) ); + + /* Set the co-routine control block as a link back from the ListItem_t. + * This is so we can get back to the containing CRCB from a generic item + * in a list. */ + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine ); + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) ); + + /* Now the co-routine has been initialised it can be added to the ready + * list at the correct priority. */ + prvAddCoRoutineToReadyQueue( pxCoRoutine ); + + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, + List_t * pxEventList ) + { + TickType_t xTimeToWake; + + /* Calculate the time to wake - this may overflow but this is + * not a problem. */ + xTimeToWake = xCoRoutineTickCount + xTicksToDelay; + + /* We must remove ourselves from the ready list before adding + * ourselves to the blocked list as the same list item is used for + * both lists. */ + ( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake ); + + if( xTimeToWake < xCoRoutineTickCount ) + { + /* Wake time has overflowed. Place this item in the + * overflow list. */ + vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + else + { + /* The wake time has not overflowed, so we can use the + * current block list. */ + vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + + if( pxEventList ) + { + /* Also add the co-routine to an event list. If this is done then the + * function must be called with interrupts disabled. */ + vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) ); + } + } +/*-----------------------------------------------------------*/ + + static void prvCheckPendingReadyList( void ) + { + /* Are there any co-routines waiting to get moved to the ready list? These + * are co-routines that have been readied by an ISR. The ISR cannot access + * the ready lists itself. */ + while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE ) + { + CRCB_t * pxUnblockedCRCB; + + /* The pending ready list can be accessed by an ISR. */ + portDISABLE_INTERRUPTS(); + { + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyCoRoutineList ) ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + } + portENABLE_INTERRUPTS(); + + ( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) ); + prvAddCoRoutineToReadyQueue( pxUnblockedCRCB ); + } + } +/*-----------------------------------------------------------*/ + + static void prvCheckDelayedList( void ) + { + CRCB_t * pxCRCB; + + xPassedTicks = xTaskGetTickCount() - xLastTickCount; + + while( xPassedTicks ) + { + xCoRoutineTickCount++; + xPassedTicks--; + + /* If the tick count has overflowed we need to swap the ready lists. */ + if( xCoRoutineTickCount == 0 ) + { + List_t * pxTemp; + + /* Tick count has overflowed so we need to swap the delay lists. If there are + * any items in pxDelayedCoRoutineList here then there is an error! */ + pxTemp = pxDelayedCoRoutineList; + pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList; + pxOverflowDelayedCoRoutineList = pxTemp; + } + + /* See if this tick has made a timeout expire. */ + while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE ) + { + pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList ); + + if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) ) + { + /* Timeout not yet expired. */ + break; + } + + portDISABLE_INTERRUPTS(); + { + /* The event could have occurred just before this critical + * section. If this is the case then the generic list item will + * have been moved to the pending ready list and the following + * line is still valid. Also the pvContainer parameter will have + * been set to NULL so the following lines are also valid. */ + ( void ) uxListRemove( &( pxCRCB->xGenericListItem ) ); + + /* Is the co-routine waiting on an event also? */ + if( pxCRCB->xEventListItem.pxContainer ) + { + ( void ) uxListRemove( &( pxCRCB->xEventListItem ) ); + } + } + portENABLE_INTERRUPTS(); + + prvAddCoRoutineToReadyQueue( pxCRCB ); + } + } + + xLastTickCount = xCoRoutineTickCount; + } +/*-----------------------------------------------------------*/ + + void vCoRoutineSchedule( void ) + { + /* Only run a co-routine after prvInitialiseCoRoutineLists() has been + * called. prvInitialiseCoRoutineLists() is called automatically when a + * co-routine is created. */ + if( pxDelayedCoRoutineList != NULL ) + { + /* See if any co-routines readied by events need moving to the ready lists. */ + prvCheckPendingReadyList(); + + /* See if any delayed co-routines have timed out. */ + prvCheckDelayedList(); + + /* Find the highest priority queue that contains ready co-routines. */ + while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) ) + { + if( uxTopCoRoutineReadyPriority == 0 ) + { + /* No more co-routines to check. */ + return; + } + + --uxTopCoRoutineReadyPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines + * of the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ); + + /* Call the co-routine. */ + ( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex ); + } + } +/*-----------------------------------------------------------*/ + + static void prvInitialiseCoRoutineLists( void ) + { + UBaseType_t uxPriority; + + for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ ) + { + vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) ); + } + + vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 ); + vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 ); + vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList ); + + /* Start with pxDelayedCoRoutineList using list1 and the + * pxOverflowDelayedCoRoutineList using list2. */ + pxDelayedCoRoutineList = &xDelayedCoRoutineList1; + pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2; + } +/*-----------------------------------------------------------*/ + + BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ) + { + CRCB_t * pxUnblockedCRCB; + BaseType_t xReturn; + + /* This function is called from within an interrupt. It can only access + * event lists and the pending ready list. This function assumes that a + * check has already been made to ensure pxEventList is not empty. */ + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) ); + + if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES == 0 */ diff --git a/event_groups.c b/event_groups.c index 3143c220fe5..ea4c79f4b1e 100644 --- a/event_groups.c +++ b/event_groups.c @@ -537,15 +537,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt new file mode 100644 index 00000000000..46a1c3e7f5f --- /dev/null +++ b/include/CMakeLists.txt @@ -0,0 +1,15 @@ +# FreeRTOS internal cmake file. Do not use it in user top-level project + +add_library(freertos_kernel_include INTERFACE) + +target_include_directories(freertos_kernel_include + INTERFACE + . + # Note: DEPRECATED but still supported, may be removed in a future release. + $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> +) + +target_link_libraries(freertos_kernel_include + INTERFACE + $<$:freertos_config> +) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 809f5a9c0bf..a9fa182934a 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -81,6 +81,11 @@ #endif #endif +/* Set configUSE_MPU_WRAPPERS_V1 to 1 to use MPU wrappers v1. */ +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" @@ -175,6 +180,10 @@ #error Macro configTICK_TYPE_WIDTH_IN_BITS is defined to incorrect value. See the Configuration section of the FreeRTOS API documentation for details. #endif +#ifndef configUSE_CO_ROUTINES + #define configUSE_CO_ROUTINES 0 +#endif + #ifndef INCLUDE_vTaskPrioritySet #define INCLUDE_vTaskPrioritySet 0 #endif @@ -269,8 +278,10 @@ #define INCLUDE_xTaskGetCurrentTaskHandle 1 #endif -#if ( defined( configUSE_CO_ROUTINES ) && configUSE_CO_ROUTINES != 0 ) - #warning Co-routines have been removed from FreeRTOS-Kernel versions released after V10.5.1. You can view previous versions of the FreeRTOS Kernel at github.com/freertos/freertos-kernel/tree/V10.5.1 . +#if configUSE_CO_ROUTINES != 0 + #ifndef configMAX_CO_ROUTINE_PRIORITIES + #error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1. + #endif #endif #ifndef configUSE_DAEMON_TASK_STARTUP_HOOK @@ -1011,10 +1022,6 @@ #define portDONT_DISCARD #endif -#ifndef portNORETURN - #define portNORETURN -#endif - #ifndef configUSE_TIME_SLICING #define configUSE_TIME_SLICING 1 #endif @@ -1216,6 +1223,7 @@ #define xTaskParameters TaskParameters_t #define xTaskStatusType TaskStatus_t #define xTimerHandle TimerHandle_t + #define xCoRoutineHandle CoRoutineHandle_t #define pdTASK_HOOK_CODE TaskHookFunction_t #define portTICK_RATE_MS portTICK_PERIOD_MS #define pcTaskGetTaskName pcTaskGetName diff --git a/include/croutine.h b/include/croutine.h new file mode 100644 index 00000000000..664e38b3764 --- /dev/null +++ b/include/croutine.h @@ -0,0 +1,755 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef CO_ROUTINE_H +#define CO_ROUTINE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include croutine.h" +#endif + +#include "list.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +/* Used to hide the implementation of the co-routine control block. The + * control block structure however has to be included in the header due to + * the macro implementation of the co-routine functionality. */ +typedef void * CoRoutineHandle_t; + +/* Defines the prototype to which co-routine functions must conform. */ +typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, + UBaseType_t ); + +typedef struct corCoRoutineControlBlock +{ + crCOROUTINE_CODE pxCoRoutineFunction; + ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /**< Used internally by the co-routine implementation. */ +} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ + +/** + * croutine. h + * @code{c} + * BaseType_t xCoRoutineCreate( + * crCOROUTINE_CODE pxCoRoutineCode, + * UBaseType_t uxPriority, + * UBaseType_t uxIndex + * ); + * @endcode + * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * // This may not be necessary for const variables. + * static const char cLedToFlash[ 2 ] = { 5, 6 }; + * static const TickType_t uxFlashRates[ 2 ] = { 200, 400 }; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // This co-routine just delays for a fixed period, then toggles + * // an LED. Two co-routines are created using this function, so + * // the uxIndex parameter is used to tell the co-routine which + * // LED to flash and how int32_t to delay. This assumes xQueue has + * // already been created. + * vParTestToggleLED( cLedToFlash[ uxIndex ] ); + * crDELAY( xHandle, uxFlashRates[ uxIndex ] ); + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * + * // Function that creates two co-routines. + * void vOtherFunction( void ) + * { + * uint8_t ucParameterToPass; + * TaskHandle_t xHandle; + * + * // Create two co-routines at priority 0. The first is given index 0 + * // so (from the code above) toggles LED 5 every 200 ticks. The second + * // is given index 1 so toggles LED 6 every 400 ticks. + * for( uxIndex = 0; uxIndex < 2; uxIndex++ ) + * { + * xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex ); + * } + * } + * @endcode + * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex ); + + +/** + * croutine. h + * @code{c} + * void vCoRoutineSchedule( void ); + * @endcode + * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: + * @code{c} + * // This idle task hook will schedule a co-routine each time it is called. + * // The rest of the idle task will execute between co-routine calls. + * void vApplicationIdleHook( void ) + * { + * vCoRoutineSchedule(); + * } + * + * // Alternatively, if you do not require any other part of the idle task to + * // execute, the idle task hook can call vCoRoutineSchedule() within an + * // infinite loop. + * void vApplicationIdleHook( void ) + * { + * for( ;; ) + * { + * vCoRoutineSchedule(); + * } + * } + * @endcode + * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + * @code{c} + * crSTART( CoRoutineHandle_t xHandle ); + * @endcode + * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static int32_t ulAVariable; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Co-routine functionality goes here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) \ + switch( ( ( CRCB_t * ) ( pxCRCB ) )->uxState ) { \ + case 0: + +/** + * croutine. h + * @code{c} + * crEND(); + * @endcode + * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static int32_t ulAVariable; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Co-routine functionality goes here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) \ + ( ( CRCB_t * ) ( xHandle ) )->uxState = ( __LINE__ * 2 ); return; \ + case ( __LINE__ * 2 ): +#define crSET_STATE1( xHandle ) \ + ( ( CRCB_t * ) ( xHandle ) )->uxState = ( ( __LINE__ * 2 ) + 1 ); return; \ + case ( ( __LINE__ * 2 ) + 1 ): + +/** + * croutine. h + * @code{c} + * crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay ); + * @endcode + * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * // This may not be necessary for const variables. + * // We are to delay for 200ms. + * static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Delay for 200ms. + * crDELAY( xHandle, xDelayTime ); + * + * // Do something here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + do { \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); \ + } while( 0 ) + +/** + * @code{c} + * crQUEUE_SEND( + * CoRoutineHandle_t xHandle, + * QueueHandle_t pxQueue, + * void *pvItemToQueue, + * TickType_t xTicksToWait, + * BaseType_t *pxResult + * ) + * @endcode + * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: + * @code{c} + * // Co-routine function that blocks for a fixed period then posts a number onto + * // a queue. + * static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static BaseType_t xNumberToPost = 0; + * static BaseType_t xResult; + * + * // Co-routines must begin with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // This assumes the queue has already been created. + * crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult ); + * + * if( xResult != pdPASS ) + * { + * // The message was not posted! + * } + * + * // Increment the number to be posted onto the queue. + * xNumberToPost++; + * + * // Delay for 100 ticks. + * crDELAY( xHandle, 100 ); + * } + * + * // Co-routines must end with a call to crEND(). + * crEND(); + * } + * @endcode + * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ + do { \ + *( pxResult ) = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ + } while( 0 ) + +/** + * croutine. h + * @code{c} + * crQUEUE_RECEIVE( + * CoRoutineHandle_t xHandle, + * QueueHandle_t pxQueue, + * void *pvBuffer, + * TickType_t xTicksToWait, + * BaseType_t *pxResult + * ) + * @endcode + * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: + * @code{c} + * // A co-routine receives the number of an LED to flash from a queue. It + * // blocks on the queue until the number is received. + * static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static BaseType_t xResult; + * static UBaseType_t uxLEDToFlash; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Wait for data to become available on the queue. + * crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + * + * if( xResult == pdPASS ) + * { + * // We received the LED to flash - flash it! + * vParTestToggleLED( uxLEDToFlash ); + * } + * } + * + * crEND(); + * } + * @endcode + * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ + do { \ + *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ + } while( 0 ) + +/** + * croutine. h + * @code{c} + * crQUEUE_SEND_FROM_ISR( + * QueueHandle_t pxQueue, + * void *pvItemToQueue, + * BaseType_t xCoRoutinePreviouslyWoken + * ) + * @endcode + * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: + * @code{c} + * // A co-routine that blocks on a queue waiting for characters to be received. + * static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * char cRxedChar; + * BaseType_t xResult; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Wait for data to become available on the queue. This assumes the + * // queue xCommsRxQueue has already been created! + * crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + * + * // Was a character received? + * if( xResult == pdPASS ) + * { + * // Process the character here. + * } + * } + * + * // All co-routines must end with a call to crEND(). + * crEND(); + * } + * + * // An ISR that uses a queue to send characters received on a serial port to + * // a co-routine. + * void vUART_ISR( void ) + * { + * char cRxedChar; + * BaseType_t xCRWokenByPost = pdFALSE; + * + * // We loop around reading characters until there are none left in the UART. + * while( UART_RX_REG_NOT_EMPTY() ) + * { + * // Obtain the character from the UART. + * cRxedChar = UART_RX_REG; + * + * // Post the character onto a queue. xCRWokenByPost will be pdFALSE + * // the first time around the loop. If the post causes a co-routine + * // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE. + * // In this manner we can ensure that if more than one co-routine is + * // blocked on the queue only one is woken by this ISR no matter how + * // many characters are posted to the queue. + * xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost ); + * } + * } + * @endcode + * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) \ + xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + * @code{c} + * crQUEUE_SEND_FROM_ISR( + * QueueHandle_t pxQueue, + * void *pvBuffer, + * BaseType_t * pxCoRoutineWoken + * ) + * @endcode + * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: + * @code{c} + * // A co-routine that posts a character to a queue then blocks for a fixed + * // period. The character is incremented each time. + * static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // cChar holds its value while this co-routine is blocked and must therefore + * // be declared static. + * static char cCharToTx = 'a'; + * BaseType_t xResult; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Send the next character to the queue. + * crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult ); + * + * if( xResult == pdPASS ) + * { + * // The character was successfully posted to the queue. + * } + * else + * { + * // Could not post the character to the queue. + * } + * + * // Enable the UART Tx interrupt to cause an interrupt in this + * // hypothetical UART. The interrupt will obtain the character + * // from the queue and send it. + * ENABLE_RX_INTERRUPT(); + * + * // Increment to the next character then block for a fixed period. + * // cCharToTx will maintain its value across the delay as it is + * // declared static. + * cCharToTx++; + * if( cCharToTx > 'x' ) + * { + * cCharToTx = 'a'; + * } + * crDELAY( 100 ); + * } + * + * // All co-routines must end with a call to crEND(). + * crEND(); + * } + * + * // An ISR that uses a queue to receive characters to send on a UART. + * void vUART_ISR( void ) + * { + * char cCharToTx; + * BaseType_t xCRWokenByPost = pdFALSE; + * + * while( UART_TX_REG_EMPTY() ) + * { + * // Are there any characters in the queue waiting to be sent? + * // xCRWokenByPost will automatically be set to pdTRUE if a co-routine + * // is woken by the post - ensuring that only a single co-routine is + * // woken no matter how many times we go around this loop. + * if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) ) + * { + * SEND_CHARACTER( cCharToTx ); + * } + * } + * } + * @endcode + * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) \ + xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, + List_t * pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ); + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + +#endif /* CO_ROUTINE_H */ diff --git a/include/mpu_prototypes.h b/include/mpu_prototypes.h index 08fa0519317..633efd4a817 100644 --- a/include/mpu_prototypes.h +++ b/include/mpu_prototypes.h @@ -39,20 +39,6 @@ #define MPU_PROTOTYPES_H /* MPU versions of task.h API functions. */ -BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, - const char * const pcName, - const uint16_t usStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, - const char * const pcName, - const uint32_t ulStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL; void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; @@ -63,17 +49,11 @@ void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t * pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskPrioritySet( TaskHandle_t xTask, - UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, @@ -84,16 +64,14 @@ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, void * pvValue ) FREERTOS_SYSTEM_CALL; void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, - void * pvParameter ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, @@ -112,14 +90,55 @@ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint16_t usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; +void MPU_vTaskPrioritySet( TaskHandle_t xTask, + UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION; +UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; /* MPU versions of queue.h API functions. */ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, @@ -136,15 +155,6 @@ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, - StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount, - StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; @@ -153,65 +163,97 @@ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char * pcName ) FREERTOS_SYSTEM_CALL; void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - uint8_t * pucQueueStorage, - StaticQueue_t * pxStaticQueue, - const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, - QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, - BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL; void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +void MPU_vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, + StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount, + StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + StaticQueue_t * pxStaticQueue, + const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, + BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue, + const void * const pvItemToQueue, + BaseType_t * const pxHigherPriorityTaskWoken, + const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue, + void * const pvBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue, + void * const pvBuffer, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + /* MPU versions of timers.h API functions. */ -TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL; -TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction, - StaticTimer_t * pxTimerBuffer ) FREERTOS_SYSTEM_CALL; void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void * pvNewID ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, - void * pvParameter1, - uint32_t ulParameter2, - TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t * pxTimerBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION; /* MPU versions of event_group.h API functions. */ -EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL; -EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL; EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, @@ -225,8 +267,26 @@ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL; -UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; +#if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; + void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL; +#endif /* ( configUSE_TRACE_FACILITY == 1 )*/ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; /* MPU versions of message/stream_buffer.h API functions. */ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, @@ -237,28 +297,45 @@ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void * pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, - StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, - StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; - - + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers, + uint8_t * ppucStreamBufferStorageArea, + StaticStreamBuffer_t * ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION; +size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; #endif /* MPU_PROTOTYPES_H */ diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h index fb8aedfee81..020efc3ef68 100644 --- a/include/mpu_wrappers.h +++ b/include/mpu_wrappers.h @@ -47,114 +47,184 @@ */ /* Map standard task.h API functions to the MPU equivalents. */ - #define xTaskCreate MPU_xTaskCreate - #define xTaskCreateStatic MPU_xTaskCreateStatic - #define vTaskDelete MPU_vTaskDelete - #define vTaskDelay MPU_vTaskDelay - #define xTaskDelayUntil MPU_xTaskDelayUntil - #define xTaskAbortDelay MPU_xTaskAbortDelay - #define uxTaskPriorityGet MPU_uxTaskPriorityGet - #define eTaskGetState MPU_eTaskGetState - #define vTaskGetInfo MPU_vTaskGetInfo - #define vTaskPrioritySet MPU_vTaskPrioritySet - #define vTaskSuspend MPU_vTaskSuspend - #define vTaskResume MPU_vTaskResume - #define vTaskSuspendAll MPU_vTaskSuspendAll - #define xTaskResumeAll MPU_xTaskResumeAll - #define xTaskGetTickCount MPU_xTaskGetTickCount - #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks - #define pcTaskGetName MPU_pcTaskGetName - #define xTaskGetHandle MPU_xTaskGetHandle - #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark - #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 - #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag - #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag - #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer - #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer - #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook - #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle - #define uxTaskGetSystemState MPU_uxTaskGetSystemState - #define vTaskList MPU_vTaskList - #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats - #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter - #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent - #define xTaskGenericNotify MPU_xTaskGenericNotify - #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait - #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake - #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear - #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear - #define xTaskCatchUpTicks MPU_xTaskCatchUpTicks - - #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle - #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState - #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut - #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + #define vTaskDelay MPU_vTaskDelay + #define xTaskDelayUntil MPU_xTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter + #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait + #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake + #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear + #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define ulTaskGetRunTimeCounter MPU_ulTaskGetRunTimeCounter + #define ulTaskGetRunTimePercent MPU_ulTaskGetRunTimePercent + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define vTaskDelete MPU_vTaskDelete + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define xTaskGetHandle MPU_xTaskGetHandle + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define xTaskCreateRestrictedStatic MPU_xTaskCreateRestrictedStatic + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define xTaskGetStaticBuffers MPU_xTaskGetStaticBuffers + #define uxTaskPriorityGetFromISR MPU_uxTaskPriorityGetFromISR + #define xTaskResumeFromISR MPU_xTaskResumeFromISR + #define xTaskGetApplicationTaskTagFromISR MPU_xTaskGetApplicationTaskTagFromISR + #define xTaskGenericNotifyFromISR MPU_xTaskGenericNotifyFromISR + #define vTaskGenericNotifyGiveFromISR MPU_vTaskGenericNotifyGiveFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard queue.h API functions to the MPU equivalents. */ - #define xQueueGenericSend MPU_xQueueGenericSend - #define xQueueReceive MPU_xQueueReceive - #define xQueuePeek MPU_xQueuePeek - #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake - #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting - #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif /* #if ( configQUEUE_REGISTRY_SIZE > 0 ) */ + +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ #define vQueueDelete MPU_vQueueDelete #define xQueueCreateMutex MPU_xQueueCreateMutex #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic - #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder - #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive - #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive #define xQueueGenericCreate MPU_xQueueGenericCreate #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueGenericReset MPU_xQueueGenericReset #define xQueueCreateSet MPU_xQueueCreateSet - #define xQueueAddToSet MPU_xQueueAddToSet #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet - #define xQueueSelectFromSet MPU_xQueueSelectFromSet - #define xQueueGenericReset MPU_xQueueGenericReset - #if ( configQUEUE_REGISTRY_SIZE > 0 ) - #define vQueueAddToRegistry MPU_vQueueAddToRegistry - #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue - #define pcQueueGetName MPU_pcQueueGetName - #endif + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xQueueGenericGetStaticBuffers MPU_xQueueGenericGetStaticBuffers + #define xQueueGenericSendFromISR MPU_xQueueGenericSendFromISR + #define xQueueGiveFromISR MPU_xQueueGiveFromISR + #define xQueuePeekFromISR MPU_xQueuePeekFromISR + #define xQueueReceiveFromISR MPU_xQueueReceiveFromISR + #define xQueueIsQueueEmptyFromISR MPU_xQueueIsQueueEmptyFromISR + #define xQueueIsQueueFullFromISR MPU_xQueueIsQueueFullFromISR + #define uxQueueMessagesWaitingFromISR MPU_uxQueueMessagesWaitingFromISR + #define xQueueGetMutexHolderFromISR MPU_xQueueGetMutexHolderFromISR + #define xQueueSelectFromSetFromISR MPU_xQueueSelectFromSetFromISR + #endif /* if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard timer.h API functions to the MPU equivalents. */ - #define pvTimerGetTimerID MPU_pvTimerGetTimerID - #define vTimerSetTimerID MPU_vTimerSetTimerID - #define xTimerIsTimerActive MPU_xTimerIsTimerActive - #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle - #define pcTimerGetName MPU_pcTimerGetName - #define vTimerSetReloadMode MPU_vTimerSetReloadMode - #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode - #define xTimerGetPeriod MPU_xTimerGetPeriod - #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime - #define xTimerGenericCommand MPU_xTimerGenericCommand + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerGenericCommand MPU_xTimerGenericCommand + #define pcTimerGetName MPU_pcTimerGetName + #define vTimerSetReloadMode MPU_vTimerSetReloadMode + #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xTimerGetReloadMode MPU_xTimerGetReloadMode + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define xTimerGetStaticBuffer MPU_xTimerGetStaticBuffer + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard event_group.h API functions to the MPU equivalents. */ - #define xEventGroupCreate MPU_xEventGroupCreate - #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic - #define xEventGroupWaitBits MPU_xEventGroupWaitBits - #define xEventGroupClearBits MPU_xEventGroupClearBits - #define xEventGroupSetBits MPU_xEventGroupSetBits - #define xEventGroupSync MPU_xEventGroupSync - #define vEventGroupDelete MPU_vEventGroupDelete + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + #define uxEventGroupGetNumber MPU_uxEventGroupGetNumber + #define vEventGroupSetNumber MPU_vEventGroupSetNumber + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define vEventGroupDelete MPU_vEventGroupDelete + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xEventGroupGetStaticBuffer MPU_xEventGroupGetStaticBuffer + #define xEventGroupClearBitsFromISR MPU_xEventGroupClearBitsFromISR + #define xEventGroupSetBitsFromISR MPU_xEventGroupSetBitsFromISR + #define xEventGroupGetBitsFromISR MPU_xEventGroupGetBitsFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard message/stream_buffer.h API functions to the MPU * equivalents. */ #define xStreamBufferSend MPU_xStreamBufferSend #define xStreamBufferReceive MPU_xStreamBufferReceive - #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes - #define vStreamBufferDelete MPU_vStreamBufferDelete #define xStreamBufferIsFull MPU_xStreamBufferIsFull #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty - #define xStreamBufferReset MPU_xStreamBufferReset #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel - #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate - #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferReset MPU_xStreamBufferReset + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xStreamBufferGetStaticBuffers MPU_xStreamBufferGetStaticBuffers + #define xStreamBufferSendFromISR MPU_xStreamBufferSendFromISR + #define xStreamBufferReceiveFromISR MPU_xStreamBufferReceiveFromISR + #define xStreamBufferSendCompletedFromISR MPU_xStreamBufferSendCompletedFromISR + #define xStreamBufferReceiveCompletedFromISR MPU_xStreamBufferReceiveCompletedFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Remove the privileged function macro, but keep the PRIVILEGED_DATA * macro so applications can place data in privileged access sections diff --git a/include/portable.h b/include/portable.h index 52d5434fe93..5734eb72037 100644 --- a/include/portable.h +++ b/include/portable.h @@ -110,13 +110,15 @@ StackType_t * pxEndOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION; #else StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; - #endif + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION; + #endif /* if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) */ #else /* if ( portUSING_MPU_WRAPPERS == 1 ) */ #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -229,6 +231,22 @@ void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; #endif +/** + * @brief Checks if the calling task is authorized to access the given buffer. + * + * @param pvBuffer The buffer which the calling task wants to access. + * @param ulBufferLength The length of the pvBuffer. + * @param ulAccessRequested The permissions that the calling task wants. + * + * @return pdTRUE if the calling task is authorized to access the buffer, + * pdFALSE otherwise. + */ +#if ( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) PRIVILEGED_FUNCTION; +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/include/queue.h b/include/queue.h index 66c8286aef0..1c1b9822afd 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1455,6 +1455,28 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FU BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, + void * pvBuffer, + BaseType_t * pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, + void * pvBuffer, + TickType_t xTicksToWait ); + /* * For internal use only. Use xSemaphoreCreateMutex(), * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling @@ -1730,7 +1752,7 @@ void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; - +UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/include/semphr.h b/include/semphr.h index 7977a01b84e..e93b708c51c 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -95,13 +95,13 @@ typedef QueueHandle_t SemaphoreHandle_t; */ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) #define vSemaphoreCreateBinary( xSemaphore ) \ - { \ + do { \ ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ if( ( xSemaphore ) != NULL ) \ { \ ( void ) xSemaphoreGive( ( xSemaphore ) ); \ } \ - } + } while( 0 ) #endif /** @@ -1193,7 +1193,8 @@ typedef QueueHandle_t SemaphoreHandle_t; /** * semphr.h * @code{c} - * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore ); + * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore, + * StaticSemaphore_t ** ppxSemaphoreBuffer ); * @endcode * * Retrieve pointer to a statically created binary semaphore, counting semaphore, diff --git a/include/stack_macros.h b/include/stack_macros.h index 7ffc7b34338..7ead99ffd0f 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -57,13 +57,13 @@ /* Only the current stack state is to be checked. */ #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ /* Is the currently saved stack pointer within the stack limit? */ \ if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ /*-----------------------------------------------------------*/ @@ -72,14 +72,14 @@ /* Only the current stack state is to be checked. */ #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ \ /* Is the currently saved stack pointer within the stack limit? */ \ if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ /*-----------------------------------------------------------*/ @@ -106,7 +106,7 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ @@ -122,7 +122,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/include/task.h b/include/task.h index 52d5d40602b..a7e1c795e31 100644 --- a/include/task.h +++ b/include/task.h @@ -66,6 +66,11 @@ #define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) #define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) +/* MPU region permissions stored in MPU settings to + * authorize access requests. */ +#define tskMPU_READ_PERMISSION ( 1UL << 0UL ) +#define tskMPU_WRITE_PERMISSION ( 1UL << 1UL ) + /* The direct to task notification feature used to have only a single notification * per task. Now there is an array of notifications per task that is dimensioned by * configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the @@ -3445,7 +3450,7 @@ void vTaskExitCritical( void ); * should be used in the implementation of portENTER_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -portBASE_TYPE vTaskEnterCriticalFromISR( void ); +UBaseType_t vTaskEnterCriticalFromISR( void ); /* * This function is only intended for use when implementing a port of the scheduler @@ -3453,7 +3458,16 @@ portBASE_TYPE vTaskEnterCriticalFromISR( void ); * should be used in the implementation of portEXIT_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); +void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); + +#if ( portUSING_MPU_WRAPPERS == 1 ) + +/* + * For internal use only. Get MPU settings associated with a task. + */ + xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* portUSING_MPU_WRAPPERS */ /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index d064969809c..3609c67ef33 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -73,16 +73,22 @@ 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')], 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')], 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')] }, 'IAR':{ @@ -91,16 +97,22 @@ 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')], 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')], 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')] }, } diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c index 44f159af1fa..64a24f527ff 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c @@ -44,6 +44,109 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,83 +157,24 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r2] \n"/* Program RNR = 4. */ - " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r2] \n"/* Program RNR = 5. */ - " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r2] \n"/* Program RNR = 6. */ - " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r2] \n"/* Program RNR = 7. */ - " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -237,6 +281,167 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n" /* LR is now in r3. */ + " mov lr, r3 \n" /* Restore LR. */ + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " stmia r2!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r2!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " mov r6, lr \n" /* r6 = LR. */ + " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r4} \n" /* LR is now in r4. */ + " mov lr, r4 \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,52 +465,26 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #48 \n"/* r2 = r2 - 48. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " select_next_task: \n" " cpsid i \n" @@ -316,85 +495,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r4] \n"/* Program RNR = 4. */ - " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r4] \n"/* Program RNR = 5. */ - " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r4] \n"/* Program RNR = 6. */ - " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r4] \n"/* Program RNR = 7. */ - " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " adds r2, r2, #16 \n"/* Move to the high registers. */ @@ -411,16 +527,62 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -443,6 +605,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c index 7fb7b5ade5d..b11b6e97c5e 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c @@ -44,6 +44,106 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs_first_task: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,78 +154,21 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -232,6 +275,136 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + " stmia r1!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r1!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r2, psp \n" /* r2 = PSP. */ + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " mov r5, lr \n" /* r5 = LR. */ + " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -241,30 +414,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " mrs r0, psp \n"/* Read PSP in r0. */ " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #else /* configENABLE_MPU */ - " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " cpsid i \n" " bl vTaskSwitchContext \n" @@ -274,88 +433,76 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " adds r0, r0, #28 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - " bx r3 \n" - #else /* configENABLE_MPU */ - " adds r0, r0, #24 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - " bx r3 \n" - #endif /* configENABLE_MPU */ + " adds r0, r0, #24 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ + " mov r8, r4 \n"/* r8 = r4. */ + " mov r9, r5 \n"/* r9 = r5. */ + " mov r10, r6 \n"/* r10 = r6. */ + " mov r11, r7 \n"/* r11 = r7. */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ + " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -378,4 +525,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s index fffed8df619..648ae005010 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s @@ -33,12 +33,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -98,65 +107,99 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r2] /* Program RNR = 4. */ - ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r2] /* Program RNR = 5. */ - ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r2] /* Program RNR = 6. */ - ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r2] /* Program RNR = 7. */ - ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -167,6 +210,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -199,6 +243,149 @@ vClearInterruptMask: msr PRIMASK, r0 bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* Restore LR. */ + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + stmia r2!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r2!, {r4-r7} /* Store r8-r11. */ + ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + mov r6, lr /* r6 = LR. */ + stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r4} /* LR is now in r4. */ + mov lr, r4 + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ @@ -216,41 +403,18 @@ PendSV_Handler: bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ + b select_next_task save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #48 /* r2 = r2 - 48. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ @@ -261,7 +425,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ select_next_task: cpsid i @@ -272,68 +435,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r4] /* Program RNR = 4. */ - ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r4] /* Program RNR = 5. */ - ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r4] /* Program RNR = 6. */ - ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r4] /* Program RNR = 7. */ - ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -350,7 +451,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: adds r2, r2, #16 /* Move to the high registers. */ @@ -363,8 +463,45 @@ PendSV_Handler: subs r2, r2, #32 /* Go back to the low registers. */ ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -375,6 +512,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s index 62bd3872284..8f77c4dafb1 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -88,63 +97,97 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs_first_task: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -153,6 +196,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -187,23 +231,127 @@ vClearInterruptMask: bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + stmia r1!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r1!, {r4-r7} /* Store r8-r11. */ + ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r2, psp /* r2 = PSP. */ + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + mov r5, lr /* r5 = LR. */ + stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r0, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#else /* configENABLE_MPU */ + subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */ str r0, [r1] /* Save the new top of stack in TCB. */ mrs r2, psplim /* r2 = PSPLIM. */ @@ -214,7 +362,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#endif /* configENABLE_MPU */ cpsid i bl vTaskSwitchContext @@ -224,63 +371,6 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - adds r0, r0, #28 /* Move to the high registers. */ - ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ - mov r8, r4 /* r8 = r4. */ - mov r9, r5 /* r9 = r5. */ - mov r10, r6 /* r10 = r6. */ - mov r11, r7 /* r11 = r7. */ - msr psp, r0 /* Remember the new top of stack for the task. */ - subs r0, r0, #44 /* Move to the starting of the saved context. */ - ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ - bx r3 -#else /* configENABLE_MPU */ adds r0, r0, #24 /* Move to the high registers. */ ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ mov r8, r4 /* r8 = r4. */ @@ -292,9 +382,45 @@ PendSV_Handler: ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ bx r3 + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -305,6 +431,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index f3c4e5add03..530e38ad3a2 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -249,6 +249,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -285,28 +289,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index c675afe67b4..a1fc5210e39 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -268,6 +268,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -304,28 +308,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 2824df05dc0..988be152406 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -1035,7 +1035,7 @@ target_link_libraries(freertos_kernel_port $<$:pico_base_headers> $<$:idf::esp32> PRIVATE - freertos_kernel + freertos_kernel_include $<$:Threads::Threads> "$<$:hardware_clocks;hardware_exception>" $<$:winmm> # Windows library which implements timers diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c index 92841e1363e..c9951956fea 100644 --- a/portable/Common/mpu_wrappers.c +++ b/portable/Common/mpu_wrappers.c @@ -48,7 +48,7 @@ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*-----------------------------------------------------------*/ -#if ( portUSING_MPU_WRAPPERS == 1 ) +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode, @@ -2537,5 +2537,5 @@ #endif /*-----------------------------------------------------------*/ -#endif /* portUSING_MPU_WRAPPERS == 1 */ +#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) */ /*-----------------------------------------------------------*/ diff --git a/portable/Common/mpu_wrappers_v2.c b/portable/Common/mpu_wrappers_v2.c new file mode 100644 index 00000000000..1e28d8e4eb3 --- /dev/null +++ b/portable/Common/mpu_wrappers_v2.c @@ -0,0 +1,4121 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* + * Implementation of the wrapper functions used to raise the processor privilege + * before calling a standard FreeRTOS API function. + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" +#include "mpu_prototypes.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + #ifndef configPROTECTED_KERNEL_OBJECT_POOL_SIZE + #error configPROTECTED_KERNEL_OBJECT_POOL_SIZE must be defined to maximum number of kernel objects in the application. + #endif + +/** + * @brief Offset added to the index before returning to the user. + * + * If the actual handle is stored at index i, ( i + INDEX_OFFSET ) + * is returned to the user. + */ + #define INDEX_OFFSET 1 + +/** + * @brief Opaque type for a kernel object. + */ + struct OpaqueObject; + typedef struct OpaqueObject * OpaqueObjectHandle_t; + +/** + * @brief Defines kernel object in the kernel object pool. + */ + typedef struct KernelObject + { + OpaqueObjectHandle_t xInternalObjectHandle; + uint32_t ulKernelObjectType; + void * pvKernelObjectData; + } KernelObject_t; + +/** + * @brief Kernel object types. + */ + #define KERNEL_OBJECT_TYPE_INVALID ( 0UL ) + #define KERNEL_OBJECT_TYPE_QUEUE ( 1UL ) + #define KERNEL_OBJECT_TYPE_TASK ( 2UL ) + #define KERNEL_OBJECT_TYPE_STREAM_BUFFER ( 3UL ) + #define KERNEL_OBJECT_TYPE_EVENT_GROUP ( 4UL ) + #define KERNEL_OBJECT_TYPE_TIMER ( 5UL ) + +/** + * @brief Checks whether an external index is valid or not. + */ + #define IS_EXTERNAL_INDEX_VALID( lIndex ) \ + ( ( ( lIndex ) >= INDEX_OFFSET ) && \ + ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) ) + +/** + * @brief Checks whether an internal index is valid or not. + */ + #define IS_INTERNAL_INDEX_VALID( lIndex ) \ + ( ( ( lIndex ) >= 0 ) && \ + ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) ) + +/** + * @brief Converts an internal index into external. + */ + #define CONVERT_TO_EXTERNAL_INDEX( lIndex ) ( ( lIndex ) + INDEX_OFFSET ) + +/** + * @brief Converts an external index into internal. + */ + #define CONVERT_TO_INTERNAL_INDEX( lIndex ) ( ( lIndex ) - INDEX_OFFSET ) + +/** + * @brief Get the index of a free slot in the kernel object pool. + * + * If a free slot is found, this function marks the slot as + * "not free". + * + * @return Index of a free slot is returned, if a free slot is + * found. Otherwise -1 is returned. + */ + static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Set the given index as free in the kernel object pool. + * + * @param lIndex The index to set as free. + */ + static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) PRIVILEGED_FUNCTION; + +/** + * @brief Get the index at which a given kernel object is stored. + * + * @param xHandle The given kernel object handle. + * @param ulKernelObjectType The kernel object type. + * + * @return Index at which the kernel object is stored if it is a valid + * handle, -1 otherwise. + */ + static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + +/** + * @brief Store the given kernel object handle at the given index in + * the kernel object pool. + * + * @param lIndex Index to store the given handle at. + * @param xHandle Kernel object handle to store. + * @param pvKernelObjectData The data associated with the kernel object. + * Currently, only used for timer objects to store timer callback. + * @param ulKernelObjectType The kernel object type. + */ + static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex, + OpaqueObjectHandle_t xHandle, + void * pvKernelObjectData, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + +/** + * @brief Get the kernel object handle at the given index from + * the kernel object pool. + * + * @param lIndex Index at which to get the kernel object handle. + * @param ulKernelObjectType The kernel object type. + * + * @return The kernel object handle at the index. + */ + static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + + #if ( configUSE_TIMERS == 1 ) + +/** + * @brief The function registered as callback for all the timers. + * + * We intercept all the timer callbacks so that we can call application + * callbacks with opaque handle. + * + * @param xInternalHandle The internal timer handle. + */ + static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) PRIVILEGED_FUNCTION; + + #endif /* #if ( configUSE_TIMERS == 1 ) */ + +/* + * Wrappers to keep all the casting in one place. + */ + #define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + + #if ( configUSE_QUEUE_SETS == 1 ) + #define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE ) + #endif + +/* + * Wrappers to keep all the casting in one place for Task APIs. + */ + #define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK ) + #define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK ) + #define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK ) + +/* + * Wrappers to keep all the casting in one place for Event Group APIs. + */ + #define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + #define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + #define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + +/* + * Wrappers to keep all the casting in one place for Stream Buffer APIs. + */ + #define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + #define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + #define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + + #if ( configUSE_TIMERS == 1 ) + +/* + * Wrappers to keep all the casting in one place for Timer APIs. + */ + #define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER ) + #define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER ) + #define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER ) + + #endif /* #if ( configUSE_TIMERS == 1 ) */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Kernel object pool. + */ + PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL }; +/*-----------------------------------------------------------*/ + + static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lFreeIndex = -1; + + /* This function is called only from resource create APIs + * which are not supposed to be called from ISRs. Therefore, + * we only need to suspend the scheduler and do not require + * critical section. */ + vTaskSuspendAll(); + { + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL ) + { + /* Mark this index as not free. */ + xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 ); + lFreeIndex = i; + break; + } + } + } + xTaskResumeAll(); + + return lFreeIndex; + } +/*-----------------------------------------------------------*/ + + static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + + taskENTER_CRITICAL(); + { + xKernelObjectPool[ lIndex ].xInternalObjectHandle = NULL; + xKernelObjectPool[ lIndex ].ulKernelObjectType = KERNEL_OBJECT_TYPE_INVALID; + xKernelObjectPool[ lIndex ].pvKernelObjectData = NULL; + } + taskEXIT_CRITICAL(); + } +/*-----------------------------------------------------------*/ + + static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lIndex = -1; + + configASSERT( xHandle != NULL ); + + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( ( xKernelObjectPool[ i ].xInternalObjectHandle == xHandle ) && + ( xKernelObjectPool[ i ].ulKernelObjectType == ulKernelObjectType ) ) + { + lIndex = i; + break; + } + } + + return lIndex; + } +/*-----------------------------------------------------------*/ + + static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex, + OpaqueObjectHandle_t xHandle, + void * pvKernelObjectData, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + xKernelObjectPool[ lIndex ].xInternalObjectHandle = xHandle; + xKernelObjectPool[ lIndex ].ulKernelObjectType = ulKernelObjectType; + xKernelObjectPool[ lIndex ].pvKernelObjectData = pvKernelObjectData; + } +/*-----------------------------------------------------------*/ + + static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + configASSERT( xKernelObjectPool[ lIndex ].ulKernelObjectType == ulKernelObjectType ); + return xKernelObjectPool[ lIndex ].xInternalObjectHandle; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lIndex = -1; + TimerHandle_t xExternalHandle = NULL; + TimerCallbackFunction_t pxApplicationCallBack = NULL; + + /* Coming from the timer task and therefore, should be valid. */ + configASSERT( xInternalHandle != NULL ); + + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( ( ( TimerHandle_t ) xKernelObjectPool[ i ].xInternalObjectHandle == xInternalHandle ) && + ( xKernelObjectPool[ i ].ulKernelObjectType == KERNEL_OBJECT_TYPE_TIMER ) ) + { + lIndex = i; + break; + } + } + + configASSERT( lIndex != -1 ); + xExternalHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + + pxApplicationCallBack = ( TimerCallbackFunction_t ) xKernelObjectPool[ lIndex ].pvKernelObjectData; + pxApplicationCallBack( xExternalHandle ); + } + + #endif /* #if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for tasks APIs. */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskDelayUntil == 1 ) + + BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime, + TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime, + TickType_t xTimeIncrement ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + BaseType_t xIsPreviousWakeTimeAccessible = pdFALSE; + + xIsPreviousWakeTimeAccessible = xPortIsAuthorizedToAccessBuffer( pxPreviousWakeTime, + sizeof( TickType_t ), + ( tskMPU_WRITE_PERMISSION | tskMPU_READ_PERMISSION ) ); + + if( xIsPreviousWakeTimeAccessible == pdTRUE ) + { + xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); + } + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTask == NULL ) + { + xReturn = xTaskAbortDelay( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskAbortDelay( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskDelay == 1 ) + + void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + + void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) /* PRIVILEGED_FUNCTION */ + { + vTaskDelay( xTicksToDelay ); + } + + #endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = configMAX_PRIORITIES; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( pxTask == NULL ) + { + uxReturn = uxTaskPriorityGet( pxTask ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskPriorityGet( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_eTaskGetState == 1 ) + + eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) PRIVILEGED_FUNCTION; + + eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */ + { + eTaskState eReturn = eInvalid; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTask == NULL ) + { + eReturn = eTaskGetState( pxTask ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + eReturn = eTaskGetState( xInternalTaskHandle ); + } + } + } + + return eReturn; + } + + #endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + void MPU_vTaskGetInfoImpl( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) PRIVILEGED_FUNCTION; + + void MPU_vTaskGetInfoImpl( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + BaseType_t xIsTaskStatusWriteable = pdFALSE; + + xIsTaskStatusWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatus, + sizeof( TaskStatus_t ), + tskMPU_WRITE_PERMISSION ); + + if( xIsTaskStatusWriteable == pdTRUE ) + { + if( xTask == NULL ) + { + vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskGetInfo( xInternalTaskHandle, pxTaskStatus, xGetFreeStackSpace, eState ); + } + } + } + } + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xIdleTaskHandle = NULL; + + xIdleTaskHandle = xTaskGetIdleTaskHandle(); + + return xIdleTaskHandle; + } + + #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskSuspend == 1 ) + + void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( pxTaskToSuspend == NULL ) + { + vTaskSuspend( pxTaskToSuspend ); + } + else + { + /* After the scheduler starts, only privileged tasks are allowed + * to suspend other tasks. */ + if( ( xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ) || ( portIS_TASK_PRIVILEGED() == pdTRUE ) ) + { + lIndex = ( int32_t ) pxTaskToSuspend; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSuspend( xInternalTaskHandle ); + } + } + } + } + } + + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskSuspend == 1 ) + + void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) PRIVILEGED_FUNCTION; + + void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) pxTaskToResume; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskResume( xInternalTaskHandle ); + } + } + } + + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + + TickType_t MPU_xTaskGetTickCountImpl( void ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTaskGetTickCountImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn; + + xReturn = xTaskGetTickCount(); + + return xReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn; + + uxReturn = uxTaskGetNumberOfTasks(); + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; + + char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) /* PRIVILEGED_FUNCTION */ + { + char * pcReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToQuery == NULL ) + { + pcReturn = pcTaskGetName( xTaskToQuery ); + } + else + { + lIndex = ( int32_t ) xTaskToQuery; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + pcReturn = pcTaskGetName( xInternalTaskHandle ); + } + } + } + + return pcReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = ulTaskGetRunTimeCounter( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = ulTaskGetRunTimeCounter( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = ulTaskGetRunTimePercent( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = ulTaskGetRunTimePercent( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn; + + xReturn = ulTaskGetIdleRunTimePercent(); + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn; + + xReturn = ulTaskGetIdleRunTimeCounter(); + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask, + TaskHookFunction_t pxTagValue ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask, + TaskHookFunction_t pxTagValue ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTask == NULL ) + { + vTaskSetApplicationTaskTag( xTask, pxTagValue ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSetApplicationTaskTag( xInternalTaskHandle, pxTagValue ); + } + } + } + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + TaskHookFunction_t xReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGetApplicationTaskTag( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetApplicationTaskTag( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToSet == NULL ) + { + vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue ); + } + else + { + lIndex = ( int32_t ) xTaskToSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSetThreadLocalStoragePointer( xInternalTaskHandle, xIndex, pvValue ); + } + } + } + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) PRIVILEGED_FUNCTION; + + void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* PRIVILEGED_FUNCTION */ + { + void * pvReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToQuery == NULL ) + { + pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex ); + } + else + { + lIndex = ( int32_t ) xTaskToQuery; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + pvReturn = pvTaskGetThreadLocalStoragePointer( xInternalTaskHandle, xIndex ); + } + } + } + + return pvReturn; + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray, + UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray, + UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = pdFALSE; + UBaseType_t xIsTaskStatusArrayWriteable = pdFALSE; + UBaseType_t xIsTotalRunTimeWriteable = pdFALSE; + + xIsTaskStatusArrayWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatusArray, + sizeof( TaskStatus_t ) * uxArraySize, + tskMPU_WRITE_PERMISSION ); + + if( pulTotalRunTime != NULL ) + { + xIsTotalRunTimeWriteable = xPortIsAuthorizedToAccessBuffer( pulTotalRunTime, + sizeof( configRUN_TIME_COUNTER_TYPE ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( xIsTaskStatusArrayWriteable == pdTRUE ) && + ( ( pulTotalRunTime == NULL ) || ( xIsTotalRunTimeWriteable == pdTRUE ) ) ) + { + uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime ); + } + + return uxReturn; + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configSTACK_DEPTH_TYPE uxReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark2( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark2( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + TaskHandle_t xExternalTaskHandle = NULL; + int32_t lIndex; + + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + + if( xInternalTaskHandle != NULL ) + { + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetSchedulerState == 1 ) + + BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = taskSCHEDULER_NOT_STARTED; + + xReturn = xTaskGetSchedulerState(); + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + + void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xIsTimeOutWriteable = pdFALSE; + + xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut, + sizeof( TimeOut_t ), + tskMPU_WRITE_PERMISSION ); + + if( xIsTimeOutWriteable == pdTRUE ) + { + vTaskSetTimeOutState( pxTimeOut ); + } + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + BaseType_t xIsTimeOutWriteable = pdFALSE; + BaseType_t xIsTicksToWaitWriteable = pdFALSE; + + xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut, + sizeof( TimeOut_t ), + tskMPU_WRITE_PERMISSION ); + xIsTicksToWaitWriteable = xPortIsAuthorizedToAccessBuffer( pxTicksToWait, + sizeof( TickType_t ), + tskMPU_WRITE_PERMISSION ); + + if( ( xIsTimeOutWriteable == pdTRUE ) && ( xIsTicksToWaitWriteable == pdTRUE ) ) + { + xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + BaseType_t xIsPreviousNotificationValueWriteable = pdFALSE; + + if( pulPreviousNotificationValue != NULL ) + { + xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pulPreviousNotificationValue, + sizeof( uint32_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pulPreviousNotificationValue == NULL ) || ( xIsPreviousNotificationValueWriteable == pdTRUE ) ) + { + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotify( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + BaseType_t xIsNotificationValueWritable = pdFALSE; + + if( pulNotificationValue != NULL ) + { + xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pulNotificationValue, + sizeof( uint32_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pulNotificationValue == NULL ) || ( xIsNotificationValueWritable == pdTRUE ) ) + { + xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ); + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulReturn; + + ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ); + + return ulReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotifyStateClear( xInternalTaskHandle, uxIndexToClear ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + ulReturn = ulTaskGenericNotifyValueClear( xInternalTaskHandle, uxIndexToClear, ulBitsToClear ); + } + } + } + + return ulReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode, + const char * const pcName, + uint16_t usStackDepth, + void * pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + /* xTaskCreate() can only be used to create privileged tasks in MPU port. */ + if( ( uxPriority & portPRIVILEGE_BIT ) != 0 ) + { + xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + + return xReturn; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xExternalTaskHandle = NULL; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTaskHandle = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); + + if( xInternalTaskHandle != NULL ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskDelete == 1 ) + + void MPU_vTaskDelete( TaskHandle_t pxTaskToDelete ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTaskToDelete == NULL ) + { + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + vTaskDelete( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + else + { + lIndex = ( int32_t ) pxTaskToDelete; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskDelete( xInternalTaskHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } + } + + #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */ +/*-----------------------------------------------------------*/ + + + #if ( INCLUDE_vTaskPrioritySet == 1 ) + + void MPU_vTaskPrioritySet( TaskHandle_t pxTask, + UBaseType_t uxNewPriority ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTask == NULL ) + { + vTaskPrioritySet( pxTask, uxNewPriority ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskPrioritySet( xInternalTaskHandle, uxNewPriority ); + } + } + } + } + + #endif /* if ( INCLUDE_vTaskPrioritySet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + TaskHandle_t xExternalTaskHandle = NULL; + int32_t lIndex; + + xInternalTaskHandle = xTaskGetHandle( pcNameToQuery ); + + if( xInternalTaskHandle != NULL ) + { + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* if ( INCLUDE_xTaskGetHandle == 1 ) */ +/*-----------------------------------------------------------*/ + + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskCallApplicationTaskHook( xInternalTaskHandle, pvParameter ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xReturn = xTaskCreateRestricted( pxTaskDefinition, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xReturn; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xReturn = xTaskCreateRestrictedStatic( pxTaskDefinition, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xReturn; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + void MPU_vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, + const MemoryRegion_t * const xRegions ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTaskToModify == NULL ) + { + vTaskAllocateMPURegions( xTaskToModify, xRegions ); + } + else + { + lIndex = ( int32_t ) xTaskToModify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskAllocateMPURegions( xInternalTaskHandle, xRegions ); + } + } + } + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + BaseType_t xReturn = pdFALSE; + + if( xTask == NULL ) + { + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer ); + } + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = configMAX_PRIORITIES; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskPriorityGetFromISR( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskPriorityGetFromISR( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* #if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToResume; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskResumeFromISR( xInternalTaskHandle ); + } + } + + return xReturn; + } + + #endif /* #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )*/ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + TaskHookFunction_t xReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGetApplicationTaskTagFromISR( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetApplicationTaskTagFromISR( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* #if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotifyFromISR( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + + #endif /* #if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskGenericNotifyGiveFromISR( xInternalTaskHandle, uxIndexToNotify, pxHigherPriorityTaskWoken ); + } + } + } + #endif /*#if ( configUSE_TASK_NOTIFICATIONS == 1 )*/ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for queue APIs. */ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsItemToQueueReadable = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + if( pvItemToQueue != NULL ) + { + xIsItemToQueueReadable = xPortIsAuthorizedToAccessBuffer( pvItemToQueue, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_READ_PERMISSION ); + } + + if( ( pvItemToQueue == NULL ) || ( xIsItemToQueueReadable == pdTRUE ) ) + { + xReturn = xQueueGenericSend( xInternalQueueHandle, pvItemToQueue, xTicksToWait, xCopyPosition ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + UBaseType_t uxReturn = 0; + + lIndex = ( int32_t ) pxQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueMessagesWaiting( xInternalQueueHandle ); + } + } + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + UBaseType_t uxReturn = 0; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueSpacesAvailable( xInternalQueueHandle ); + } + } + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsReceiveBufferWritable = pdFALSE; + + lIndex = ( int32_t ) pxQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_WRITE_PERMISSION ); + + if( xIsReceiveBufferWritable == pdTRUE ) + { + xReturn = xQueueReceive( xInternalQueueHandle, pvBuffer, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsReceiveBufferWritable = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_WRITE_PERMISSION ); + + if( xIsReceiveBufferWritable == pdTRUE ) + { + xReturn = xQueuePeek( xInternalQueueHandle, pvBuffer, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueSemaphoreTake( xInternalQueueHandle, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xMutexHolderTaskInternalHandle = NULL; + TaskHandle_t xMutexHolderTaskExternalHandle = NULL; + int32_t lIndex, lMutexHolderTaskIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + + lIndex = ( int32_t ) xSemaphore; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalQueueHandle ); + + if( xMutexHolderTaskInternalHandle != NULL ) + { + lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle ); + + if( lMutexHolderTaskIndex != -1 ) + { + xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) ); + } + } + } + } + + return xMutexHolderTaskExternalHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex, + TickType_t xBlockTime ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex, + TickType_t xBlockTime ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xMutex; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueTakeMutexRecursive( xInternalQueueHandle, xBlockTime ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xMutex; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGiveMutexRecursive( xInternalQueueHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet, + TickType_t xBlockTimeTicks ) PRIVILEGED_FUNCTION; + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet, + TickType_t xBlockTimeTicks ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetMemberHandle_t xSelectedMemberInternal = NULL; + QueueSetMemberHandle_t xSelectedMemberExternal = NULL; + int32_t lIndexQueueSet, lIndexSelectedMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + + if( xInternalQueueSetHandle != NULL ) + { + xSelectedMemberInternal = xQueueSelectFromSet( xInternalQueueSetHandle, xBlockTimeTicks ); + + if( xSelectedMemberInternal != NULL ) + { + lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal ); + + if( lIndexSelectedMember != -1 ) + { + xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) ); + } + } + } + } + + return xSelectedMemberExternal; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL; + QueueSetHandle_t xInternalQueueSetHandle; + int32_t lIndexQueueSet, lIndexQueueSetMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore; + + if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) && + ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) ); + + if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) ) + { + xReturn = xQueueAddToSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue, + const char * pcName ) PRIVILEGED_FUNCTION; + + void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue, + const char * pcName ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueAddToRegistry( xInternalQueueHandle, pcName ); + } + } + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueUnregisterQueue( xInternalQueueHandle ); + } + } + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + const char * pcReturn; + QueueHandle_t xInternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + pcReturn = pcQueueGetName( xInternalQueueHandle ); + } + } + + return pcReturn; + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + void MPU_vQueueDelete( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueDelete( xInternalQueueHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateMutex( ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, + StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateCountingSemaphore( UBaseType_t uxCountValue, + UBaseType_t uxInitialCount ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount, + StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t MPU_xQueueGenericCreate( UBaseType_t uxQueueLength, + UBaseType_t uxItemSize, + uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + StaticQueue_t * pxStaticQueue, + const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, + BaseType_t xNewQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + + lIndex = ( uint32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericReset( xInternalQueueHandle, xNewQueue ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t MPU_xQueueCreateSet( UBaseType_t uxEventQueueLength ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetHandle_t xExternalQueueSetHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueSetHandle = xQueueCreateSet( uxEventQueueLength ); + + if( xInternalQueueSetHandle != NULL ) + { + MPU_StoreQueueSetHandleAtIndex( lIndex, xInternalQueueSetHandle ); + xExternalQueueSetHandle = ( QueueSetHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueSetHandle; + } + + #endif /* if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL; + QueueSetHandle_t xInternalQueueSetHandle; + int32_t lIndexQueueSet, lIndexQueueSetMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore; + + if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) && + ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) ); + + if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) ) + { + xReturn = xQueueRemoveFromSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericGetStaticBuffers( xInternalQueueHandle, ppucQueueStorage, ppxStaticQueue ); + } + } + + return xReturn; + } + + #endif /*if ( configSUPPORT_STATIC_ALLOCATION == 1 )*/ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue, + const void * const pvItemToQueue, + BaseType_t * const pxHigherPriorityTaskWoken, + const BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericSendFromISR( xInternalQueueHandle, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGiveFromISR( xInternalQueueHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue, + void * const pvBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueuePeekFromISR( xInternalQueueHandle, pvBuffer ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue, + void * const pvBuffer, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueReceiveFromISR( xInternalQueueHandle, pvBuffer, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueIsQueueEmptyFromISR( xInternalQueueHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueIsQueueFullFromISR( xInternalQueueHandle ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueMessagesWaitingFromISR( xInternalQueueHandle ); + } + } + + return uxReturn; + } + +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xMutexHolderTaskInternalHandle = NULL; + TaskHandle_t xMutexHolderTaskExternalHandle = NULL; + int32_t lIndex, lMutexHolderTaskIndex; + QueueHandle_t xInternalSemaphoreHandle = NULL; + + lIndex = ( int32_t ) xSemaphore; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalSemaphoreHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalSemaphoreHandle != NULL ) + { + xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalSemaphoreHandle ); + + if( xMutexHolderTaskInternalHandle != NULL ) + { + lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle ); + + if( lMutexHolderTaskIndex != -1 ) + { + xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) ); + } + } + } + } + + return xMutexHolderTaskExternalHandle; + } + + #endif /* #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetMemberHandle_t xSelectedMemberInternal = NULL; + QueueSetMemberHandle_t xSelectedMemberExternal = NULL; + int32_t lIndexQueueSet, lIndexSelectedMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + + if( xInternalQueueSetHandle != NULL ) + { + xSelectedMemberInternal = xQueueSelectFromSetFromISR( xInternalQueueSetHandle ); + + if( xSelectedMemberInternal != NULL ) + { + lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal ); + + if( lIndexSelectedMember != -1 ) + { + xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) ); + } + } + } + } + + return xSelectedMemberExternal; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for timers APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + void * pvReturn = NULL; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + pvReturn = pvTimerGetTimerID( xInternalTimerHandle ); + } + } + + return pvReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer, + void * pvNewID ) PRIVILEGED_FUNCTION; + + void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer, + void * pvNewID ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + vTimerSetTimerID( xInternalTimerHandle, pvNewID ); + } + } + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerIsTimerActive( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xReturn; + + xReturn = xTimerGetTimerDaemonTaskHandle(); + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE; + + if( pxHigherPriorityTaskWoken != NULL ) + { + xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxHigherPriorityTaskWoken, + sizeof( BaseType_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pxHigherPriorityTaskWoken == NULL ) || ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) ) + { + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGenericCommand( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + const char * pcReturn = NULL; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + pcReturn = pcTimerGetName( xInternalTimerHandle ); + } + } + + return pcReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer, + const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION; + + void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer, + const UBaseType_t uxAutoReload ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + vTimerSetReloadMode( xInternalTimerHandle, uxAutoReload ); + } + } + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetReloadMode( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + uxReturn = uxTimerGetReloadMode( xInternalTimerHandle ); + } + } + + return uxReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetPeriod( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetExpiryTime( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + TimerHandle_t xExternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTimerHandle = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback ); + + if( xInternalTimerHandle != NULL ) + { + MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction ); + xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTimerHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t * pxTimerBuffer ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + TimerHandle_t xExternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTimerHandle = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback, pxTimerBuffer ); + + if( xInternalTimerHandle != NULL ) + { + MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction ); + xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTimerHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + BaseType_t xReturn = pdFALSE; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetStaticBuffer( xInternalTimerHandle, ppxTimerBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for event group APIs. */ +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupWaitBits( xInternalEventGroupHandle, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupClearBits( xInternalEventGroupHandle, uxBitsToClear ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSetBits( xInternalEventGroupHandle, uxBitsToSet ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSync( xInternalEventGroupHandle, uxBitsToSet, uxBitsToWaitFor, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = uxEventGroupGetNumber( xInternalEventGroupHandle ); + } + } + + return xReturn; + } + + #endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + void MPU_vEventGroupSetNumberImpl( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; + + void MPU_vEventGroupSetNumberImpl( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + vEventGroupSetNumber( xInternalEventGroupHandle, uxEventGroupNumber ); + } + } + } + + #endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + EventGroupHandle_t MPU_xEventGroupCreate( void ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + EventGroupHandle_t xExternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalEventGroupHandle = xEventGroupCreate(); + + if( xInternalEventGroupHandle != NULL ) + { + MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle ); + xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalEventGroupHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + EventGroupHandle_t xExternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalEventGroupHandle = xEventGroupCreateStatic( pxEventGroupBuffer ); + + if( xInternalEventGroupHandle != NULL ) + { + MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle ); + xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalEventGroupHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + vEventGroupDelete( xInternalEventGroupHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupGetStaticBuffer( xInternalEventGroupHandle, ppxEventGroupBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupClearBitsFromISR( xInternalEventGroupHandle, uxBitsToClear ); + } + } + + return xReturn; + } + + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSetBitsFromISR( xInternalEventGroupHandle, uxBitsToSet, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupGetBitsFromISR( xInternalEventGroupHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for stream buffer APIs. */ +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + BaseType_t xIsTxDataBufferReadable = pdFALSE; + + xIsTxDataBufferReadable = xPortIsAuthorizedToAccessBuffer( pvTxData, + xDataLengthBytes, + tskMPU_READ_PERMISSION ); + + if( xIsTxDataBufferReadable == pdTRUE ) + { + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSend( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + BaseType_t xIsRxDataBufferWriteable = pdFALSE; + + xIsRxDataBufferWriteable = xPortIsAuthorizedToAccessBuffer( pvRxData, + xBufferLengthBytes, + tskMPU_WRITE_PERMISSION ); + + if( xIsRxDataBufferWriteable == pdTRUE ) + { + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceive( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferIsFull( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferIsEmpty( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSpacesAvailable( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferBytesAvailable( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSetTriggerLevel( xInternalStreamBufferHandle, xTriggerLevel ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferNextMessageLengthBytes( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + StreamBufferHandle_t xExternalStreamBufferHandle = NULL; + int32_t lIndex; + + /** + * Stream buffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalStreamBufferHandle = xStreamBufferGenericCreate( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + NULL, + NULL ); + + if( xInternalStreamBufferHandle != NULL ) + { + MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle ); + xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + xExternalStreamBufferHandle = NULL; + } + + return xExternalStreamBufferHandle; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + StreamBufferHandle_t xExternalStreamBufferHandle = NULL; + int32_t lIndex; + + /** + * Stream buffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalStreamBufferHandle = xStreamBufferGenericCreateStatic( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + pucStreamBufferStorageArea, + pxStaticStreamBuffer, + NULL, + NULL ); + + if( xInternalStreamBufferHandle != NULL ) + { + MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle ); + xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + else + { + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + xExternalStreamBufferHandle = NULL; + } + + return xExternalStreamBufferHandle; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + vStreamBufferDelete( xInternalStreamBufferHandle ); + } + + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReset( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers, + uint8_t * ppucStreamBufferStorageArea, + StaticStreamBuffer_t * ppxStaticStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffers; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = MPU_xStreamBufferGetStaticBuffers( xInternalStreamBufferHandle, ppucStreamBufferStorageArea, ppxStaticStreamBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSendFromISR( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceiveFromISR( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSendCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) /*PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceiveCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + +/* Functions that the application writer wants to execute in privileged mode + * can be defined in application_defined_privileged_functions.h. */ + + #if configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS == 1 + #include "application_defined_privileged_functions.h" + #endif +/*-----------------------------------------------------------*/ + +#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_AT91SAM7S/port.c b/portable/GCC/ARM7_AT91SAM7S/port.c index 335f9852626..a9d78bb138b 100644 --- a/portable/GCC/ARM7_AT91SAM7S/port.c +++ b/portable/GCC/ARM7_AT91SAM7S/port.c @@ -204,7 +204,7 @@ AT91PS_PITC pxPIT = AT91C_BASE_PITC; /* Configure the PIT period. */ pxPIT->PITC_PIMR = portPIT_ENABLE | portPIT_INT_ENABLE | portPIT_COUNTER_VALUE; - /* Enable the interrupt. Global interrupts are disables at this point so + /* Enable the interrupt. Global interrupts are disabled at this point so this is safe. */ AT91C_BASE_AIC->AIC_IECR = 0x1 << AT91C_ID_SYS; } diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index dc7f54578e2..14375bf9581 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -79,7 +79,6 @@ typedef unsigned long UBaseType_t; #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c index 44f159af1fa..64a24f527ff 100644 --- a/portable/GCC/ARM_CM23/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23/non_secure/portasm.c @@ -44,6 +44,109 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,83 +157,24 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r2] \n"/* Program RNR = 4. */ - " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r2] \n"/* Program RNR = 5. */ - " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r2] \n"/* Program RNR = 6. */ - " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r2] \n"/* Program RNR = 7. */ - " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -237,6 +281,167 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n" /* LR is now in r3. */ + " mov lr, r3 \n" /* Restore LR. */ + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " stmia r2!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r2!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " mov r6, lr \n" /* r6 = LR. */ + " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r4} \n" /* LR is now in r4. */ + " mov lr, r4 \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,52 +465,26 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #48 \n"/* r2 = r2 - 48. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " select_next_task: \n" " cpsid i \n" @@ -316,85 +495,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r4] \n"/* Program RNR = 4. */ - " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r4] \n"/* Program RNR = 5. */ - " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r4] \n"/* Program RNR = 6. */ - " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r4] \n"/* Program RNR = 7. */ - " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " adds r2, r2, #16 \n"/* Move to the high registers. */ @@ -411,16 +527,62 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -443,6 +605,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c index 7fb7b5ade5d..b11b6e97c5e 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c @@ -44,6 +44,106 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs_first_task: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,78 +154,21 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -232,6 +275,136 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + " stmia r1!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r1!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r2, psp \n" /* r2 = PSP. */ + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " mov r5, lr \n" /* r5 = LR. */ + " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -241,30 +414,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " mrs r0, psp \n"/* Read PSP in r0. */ " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #else /* configENABLE_MPU */ - " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " cpsid i \n" " bl vTaskSwitchContext \n" @@ -274,88 +433,76 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " adds r0, r0, #28 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - " bx r3 \n" - #else /* configENABLE_MPU */ - " adds r0, r0, #24 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - " bx r3 \n" - #endif /* configENABLE_MPU */ + " adds r0, r0, #24 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ + " mov r8, r4 \n"/* r8 = r4. */ + " mov r9, r5 \n"/* r9 = r5. */ + " mov r10, r6 \n"/* r10 = r6. */ + " mov r11, r7 \n"/* r11 = r7. */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ + " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -378,4 +525,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c old mode 100755 new mode 100644 index 9b42eac5a4e..ac8185f5f73 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -292,6 +292,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -328,28 +332,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index 36e38f75d21..0ff96b82ae8 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -79,7 +79,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM33/non_secure/portasm.c b/portable/GCC/ARM_CM33/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM33/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.c b/portable/GCC/ARM_CM35P/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portasm.c +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..df9239a41d6 --- /dev/null +++ b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c old mode 100755 new mode 100644 index 619f2b0c8be..844b2ce1f53 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -90,6 +90,7 @@ /* Constants required to set up the initial stack. */ #define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffdUL ) #define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 ) #define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 ) @@ -103,12 +104,31 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /* @@ -146,7 +166,7 @@ static void prvRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIV * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; /** * @brief Checks whether or not the processor is privileged. @@ -182,6 +202,53 @@ void vResetPrivilege( void ) __attribute__( ( naked ) ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + */ + void vSystemCallEnter( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -207,34 +274,91 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); - return pxTopOfStack; + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void vPortSVCHandler( void ) +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq vSystemCallEnter \n" + "cmp r2, %1 \n" + "beq vSystemCallEnter_1 \n" + "cmp r2, %2 \n" + "beq vSystemCallExit \n" + "b vSVCHandler_C \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ { /* Assumes psp was in use. */ __asm volatile @@ -248,12 +372,14 @@ void vPortSVCHandler( void ) " mrs r0, psp \n" #endif " b %0 \n" - ::"i" ( prvSVCHandler ) : "r0", "memory" + ::"i" ( vSVCHandler_C ) : "r0", "memory" ); } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ -static void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -262,7 +388,7 @@ static void prvSVCHandler( uint32_t * pulParam ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being - * exported from linker scripts. */ + * exported from linker scripts. */ extern uint32_t * __syscalls_flash_start__; extern uint32_t * __syscalls_flash_end__; #else @@ -296,7 +422,6 @@ static void prvSVCHandler( uint32_t * pulParam ) break; - #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the * svc was raised from any of the @@ -325,7 +450,7 @@ static void prvSVCHandler( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -333,45 +458,311 @@ static void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + static void prvRestoreContextOfFirstTask( void ) { __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ - " ldr r0, [r0] \n" - " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ - " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */ - " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */ - " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - " \n" - " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" - " msr psp, r0 \n"/* Restore the task stack pointer. */ - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldr r14, =0xfffffffd \n"/* Load exec return code. */ - " bx r14 \n" - " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " \n" + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ + " \n" + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" ); } @@ -415,6 +806,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -451,28 +846,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -603,53 +976,66 @@ void xPortPendSVHandler( void ) __asm volatile ( - " mrs r0, psp \n" + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */ " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ - " ldr r2, [r3] \n" + /*------------ Save Context. ----------- */ + " mrs r3, control \n" + " mrs r0, psp \n" + " isb \n" " \n" - " mrs r1, control \n" - " stmdb r0!, {r1, r4-r11} \n"/* Save the remaining registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */ + " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */ + " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */ " \n" - " stmdb sp!, {r3, r14} \n" - " mov r0, %0 \n" - " msr basepri, r0 \n" - " dsb \n" - " isb \n" - " bl vTaskSwitchContext \n" - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldmia sp!, {r3, r14} \n" - " \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ + /*---------- Select next task. --------- */ + " mov r0, %0 \n" + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */ - " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */ + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" - " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ " \n" - " msr psp, r0 \n" - " bx r14 \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + " \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); @@ -834,11 +1220,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -861,6 +1255,11 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -881,12 +1280,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -895,6 +1310,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) @@ -954,4 +1410,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ \ No newline at end of file +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index b15193f1ee5..d1f659e3789 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -104,10 +104,45 @@ uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; -/* Plus 1 to create space for the stack region. */ + typedef struct MPU_REGION_SETTINGS + { + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; + } xMPU_REGION_SETTINGS; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + #define MAX_CONTEXT_SIZE 20 + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -115,13 +150,15 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ - #define portSVC_START_SCHEDULER 0 - #define portSVC_YIELD 1 - #define portSVC_RAISE_PRIVILEGE 2 + #define portSVC_START_SCHEDULER 0 + #define portSVC_YIELD 1 + #define portSVC_RAISE_PRIVILEGE 2 + #define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ + #define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ + #define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -233,6 +270,16 @@ #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ + extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c old mode 100755 new mode 100644 index 88fc76db894..d18854e1beb --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -335,6 +335,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -371,28 +375,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 443661a6d3a..532eefa7405 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -82,7 +82,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..df9239a41d6 --- /dev/null +++ b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c old mode 100755 new mode 100644 index ab76ee84204..548187db351 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -118,13 +118,35 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) + /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* * Configure a number of standard MPU regions that are used by all tasks. */ @@ -160,7 +182,7 @@ static void prvRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIV * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; /* * Function to enable the VFP. @@ -201,6 +223,56 @@ void vResetPrivilege( void ) __attribute__( ( naked ) ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -227,39 +299,102 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void vPortSVCHandler( void ) { /* Assumes psp was in use. */ @@ -274,12 +409,14 @@ void vPortSVCHandler( void ) " mrs r0, psp \n" #endif " b %0 \n" - ::"i" ( prvSVCHandler ) : "r0", "memory" + ::"i" ( vSVCHandler_C ) : "r0", "memory" ); } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ -static void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -288,7 +425,7 @@ static void prvSVCHandler( uint32_t * pulParam ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being - * exported from linker scripts. */ + * exported from linker scripts. */ extern uint32_t * __syscalls_flash_start__; extern uint32_t * __syscalls_flash_end__; #else @@ -350,7 +487,7 @@ static void prvSVCHandler( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -358,52 +495,364 @@ static void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + static void prvRestoreContextOfFirstTask( void ) { __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ - " ldr r0, [r0] \n" - " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ - " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ - " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - " \n" - " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" - " msr psp, r0 \n"/* Restore the task stack pointer. */ - " mov r0, #0 \n" - " msr basepri, r0 \n" - " bx r14 \n" - " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" - "pxCurrentTCBConst2: .word pxCurrentTCB \n" + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " \n" + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ + " \n" + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 8]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 4 - 8]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 9 - 12]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 9 - 12]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ + " \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB\n" ); } /*-----------------------------------------------------------*/ @@ -458,6 +907,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -494,28 +947,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried - * from hardware is at least as many as specified in the - * CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried - * from hardware is at least as many as specified in the - * FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -657,76 +1088,94 @@ void xPortPendSVHandler( void ) __asm volatile ( - " mrs r0, psp \n" - " isb \n" + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */ " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ - " ldr r2, [r3] \n" + /*------------ Save Context. ----------- */ + " mrs r3, control \n" + " mrs r0, psp \n" + " isb \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ - " it eq \n" - " vstmdbeq r0!, {s16-s31} \n" + " add r0, r0, #0x20 \n" /* Move r0 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r0, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r0, r0, #0x20 \n" /* Set r0 back to the location of hardware saved context. */ " \n" - " mrs r1, control \n" - " stmdb r0!, {r1, r4-r11, r14} \n"/* Save the remaining registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */ + " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */ + " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */ " \n" - " stmdb sp!, {r0, r3} \n" - " mov r0, %0 \n" - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif - " msr basepri, r0 \n" - " dsb \n" - " isb \n" - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif - " bl vTaskSwitchContext \n" - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldmia sp!, {r0, r3} \n" - " \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ + /*---------- Select next task. --------- */ + " mov r0, %0 \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" - " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ - " it eq \n" - " vldmiaeq r0!, {s16-s31} \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ " \n" - " msr psp, r0 \n" - " bx r14 \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r0!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" " \n" - " .ltorg \n"/* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" - "pxCurrentTCBConst: .word pxCurrentTCB \n" + " .ltorg \n" /* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } @@ -957,11 +1406,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -984,6 +1441,12 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -1004,12 +1467,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -1018,6 +1497,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) @@ -1077,4 +1597,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ \ No newline at end of file +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index df23d95381f..5417feaefd2 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -193,9 +193,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -203,13 +239,15 @@ typedef struct MPU_SETTINGS #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -321,6 +359,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.c b/portable/GCC/ARM_CM55/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM55/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 2be4f27704d..aab077d7639 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -323,6 +323,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -359,28 +363,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 82529f998ad..f433beb52d0 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -79,7 +79,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM85/non_secure/portasm.c b/portable/GCC/ARM_CM85/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM85/non_secure/portasm.c +++ b/portable/GCC/ARM_CM85/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 5bc7c8b0877..ebf5010919d 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -116,7 +116,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 37e739bb55f..9d2d0e0cdb5 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 14d318b7b94..06ddbd88764 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index 829ff9f9b64..a5612cadfb8 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index a864419899f..a11b96c37d0 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 5a6e1db9c94..2f70a24d1b2 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index e6f1c06c845..06cca5df9ed 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index e6f1c06c845..06cca5df9ed 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/RL78/portmacro.h b/portable/GCC/RL78/portmacro.h index 8108f1c1221..34940200838 100644 --- a/portable/GCC/RL78/portmacro.h +++ b/portable/GCC/RL78/portmacro.h @@ -59,7 +59,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index 8d5e832c59e..876783dcb67 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index 4ccc083c080..03bb5dee7ef 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM23/non_secure/portasm.s b/portable/IAR/ARM_CM23/non_secure/portasm.s index fffed8df619..648ae005010 100644 --- a/portable/IAR/ARM_CM23/non_secure/portasm.s +++ b/portable/IAR/ARM_CM23/non_secure/portasm.s @@ -33,12 +33,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -98,65 +107,99 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r2] /* Program RNR = 4. */ - ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r2] /* Program RNR = 5. */ - ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r2] /* Program RNR = 6. */ - ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r2] /* Program RNR = 7. */ - ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -167,6 +210,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -199,6 +243,149 @@ vClearInterruptMask: msr PRIMASK, r0 bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* Restore LR. */ + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + stmia r2!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r2!, {r4-r7} /* Store r8-r11. */ + ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + mov r6, lr /* r6 = LR. */ + stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r4} /* LR is now in r4. */ + mov lr, r4 + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ @@ -216,41 +403,18 @@ PendSV_Handler: bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ + b select_next_task save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #48 /* r2 = r2 - 48. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ @@ -261,7 +425,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ select_next_task: cpsid i @@ -272,68 +435,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r4] /* Program RNR = 4. */ - ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r4] /* Program RNR = 5. */ - ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r4] /* Program RNR = 6. */ - ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r4] /* Program RNR = 7. */ - ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -350,7 +451,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: adds r2, r2, #16 /* Move to the high registers. */ @@ -363,8 +463,45 @@ PendSV_Handler: subs r2, r2, #32 /* Go back to the low registers. */ ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -375,6 +512,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s index 62bd3872284..8f77c4dafb1 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -88,63 +97,97 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs_first_task: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -153,6 +196,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -187,23 +231,127 @@ vClearInterruptMask: bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + stmia r1!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r1!, {r4-r7} /* Store r8-r11. */ + ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r2, psp /* r2 = PSP. */ + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + mov r5, lr /* r5 = LR. */ + stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r0, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#else /* configENABLE_MPU */ + subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */ str r0, [r1] /* Save the new top of stack in TCB. */ mrs r2, psplim /* r2 = PSPLIM. */ @@ -214,7 +362,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#endif /* configENABLE_MPU */ cpsid i bl vTaskSwitchContext @@ -224,63 +371,6 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - adds r0, r0, #28 /* Move to the high registers. */ - ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ - mov r8, r4 /* r8 = r4. */ - mov r9, r5 /* r9 = r5. */ - mov r10, r6 /* r10 = r6. */ - mov r11, r7 /* r11 = r7. */ - msr psp, r0 /* Remember the new top of stack for the task. */ - subs r0, r0, #44 /* Move to the starting of the saved context. */ - ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ - bx r3 -#else /* configENABLE_MPU */ adds r0, r0, #24 /* Move to the high registers. */ ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ mov r8, r4 /* r8 = r4. */ @@ -292,9 +382,45 @@ PendSV_Handler: ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ bx r3 + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -305,6 +431,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index d54c3aceb4a..1e3a3ded0df 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -241,6 +241,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -277,28 +281,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM33/non_secure/portasm.s b/portable/IAR/ARM_CM33/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM33/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.s b/portable/IAR/ARM_CM35P/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portasm.s +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index e0deaf12840..52f5ac28709 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -279,6 +279,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -315,28 +319,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..a0541f790ba --- /dev/null +++ b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S @@ -0,0 +1,1556 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Including FreeRTOSConfig.h here will cause build errors if the header file + * contains code not understood by the assembler - for example the 'extern' keyword. + * To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so + * the code is included in C files but excluded by the preprocessor in assembly + * files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 1b7cca65c86..27f6f0d6c9a 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -132,8 +132,14 @@ #define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 ) #define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* The systick is a 24-bit counter. */ #define portMAX_24_BIT_NUMBER ( 0xffffffUL ) @@ -147,6 +153,21 @@ * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* * Configure a number of standard MPU regions that are used by all tasks. */ @@ -184,7 +205,7 @@ extern void vPortEnableVFP( void ); /* * The C portion of the SVC handler. */ -void vPortSVCHandler_C( uint32_t * pulParam ); +void vPortSVCHandler_C( uint32_t * pulParam ) PRIVILEGED_FUNCTION; /* * Called from the SVC handler used to start the scheduler. @@ -208,6 +229,57 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -233,46 +305,56 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - - /* Offset added to account for the way the MCU uses the stack on entry/exit - * of interrupts, and to ensure alignment. */ - pxTopOfStack--; - - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0; /* LR */ - - /* Save code space by skipping register initialisation. */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); - return pxTopOfStack; + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void vPortSVCHandler_C( uint32_t * pulParam ) +void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -334,7 +416,7 @@ void vPortSVCHandler_C( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -342,6 +424,308 @@ void vPortSVCHandler_C( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + /* * See header file for description. */ @@ -393,6 +777,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -429,28 +817,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -756,11 +1122,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -783,6 +1157,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + } lIndex = 0; @@ -803,12 +1184,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -817,6 +1214,48 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) diff --git a/portable/IAR/ARM_CM4F_MPU/portasm.s b/portable/IAR/ARM_CM4F_MPU/portasm.s index db751f6e5ef..a0cf8baa2f4 100644 --- a/portable/IAR/ARM_CM4F_MPU/portasm.s +++ b/portable/IAR/ARM_CM4F_MPU/portasm.s @@ -25,6 +25,7 @@ * https://github.com/FreeRTOS * */ + /* Including FreeRTOSConfig.h here will cause build errors if the header file contains code not understood by the assembler - for example the 'extern' keyword. To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so @@ -38,6 +39,9 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit PUBLIC xPortPendSVHandler PUBLIC vPortSVCHandler @@ -49,99 +53,141 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. /*-----------------------------------------------------------*/ +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 3 +#define portSVC_SYSTEM_CALL_ENTER_1 4 +#define portSVC_SYSTEM_CALL_EXIT 5 +/*-----------------------------------------------------------*/ + xPortPendSVHandler: - mrs r0, psp - isb - /* Get the location of the current TCB. */ + ldr r3, =pxCurrentTCB - ldr r2, [r3] + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location where the context should be saved. */ - /* Is the task using the FPU context? If so, push high vfp registers. */ - tst r14, #0x10 - it eq - vstmdbeq r0!, {s16-s31} + /*------------ Save Context. ----------- */ + mrs r3, control + mrs r0, psp + isb - /* Save the core registers. */ - mrs r1, control - stmdb r0!, {r1, r4-r11, r14} + add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */ - /* Save the new top of stack into the first member of the TCB. */ - str r0, [r2] + stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */ + ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */ + stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */ - stmdb sp!, {r0, r3} + /*---------- Select next task. --------- */ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif msr basepri, r0 dsb isb - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 - ldmia sp!, {r0, r3} - - /* The first item in pxCurrentTCB is the task top of stack. */ - ldr r1, [r3] - ldr r0, [r1] - /* Move onto the second item in the TCB... */ - add r1, r1, #4 - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - /* Region Base Address register. */ - ldr r2, =0xe000ed9c - /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, {r4-r11} - - #ifdef configTOTAL_MPU_REGIONS - #if ( configTOTAL_MPU_REGIONS == 16 ) - /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - stmia r2, {r4-r11} - /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - stmia r2, {r4-r11} - #endif /* configTOTAL_MPU_REGIONS == 16. */ - #endif /* configTOTAL_MPU_REGIONS */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - - /* Pop the registers that are not automatically saved on exception entry. */ - ldmia r0!, {r3-r11, r14} - msr control, r3 - /* Is the task using the FPU context? If so, pop the high vfp registers - too. */ - tst r14, #0x10 - it eq - vldmiaeq r0!, {s16-s31} + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ + +#ifdef configTOTAL_MPU_REGIONS + #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ +#endif + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ msr psp, r0 - isb + stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ + msr control, r3 - bx r14 + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +vPortSVCHandler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #portSVC_SYSTEM_CALL_ENTER + beq syscall_enter + cmp r2, #portSVC_SYSTEM_CALL_ENTER_1 + beq syscall_enter_1 + cmp r2, #portSVC_SYSTEM_CALL_EXIT + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + vPortSVCHandler: - #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */ + #ifndef USE_PROCESS_STACK tst lr, #4 ite eq mrseq r0, msp @@ -151,6 +197,7 @@ vPortSVCHandler: #endif b vPortSVCHandler_C +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortStartFirstTask: @@ -176,60 +223,56 @@ vPortStartFirstTask: /*-----------------------------------------------------------*/ vPortRestoreContextOfFirstTask: - /* Use the NVIC offset register to locate the stack. */ - ldr r0, =0xE000ED08 + ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ ldr r0, [r0] ldr r0, [r0] - /* Set the msp back to the start of the stack. */ - msr msp, r0 - /* Restore the context. */ + msr msp, r0 /* Set the msp back to the start of the stack. */ + + /*------------ Program MPU. ------------ */ ldr r3, =pxCurrentTCB - ldr r1, [r3] - /* The first item in the TCB is the task top of stack. */ - ldr r0, [r1] - /* Move onto the second item in the TCB... */ - add r1, r1, #4 - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - /* Region Base Address register. */ - ldr r2, =0xe000ed9c - /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, {r4-r11} - - #ifdef configTOTAL_MPU_REGIONS - #if ( configTOTAL_MPU_REGIONS == 16 ) - /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - stmia r2, {r4-r11} - /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - stmia r2, {r4-r11} - #endif /* configTOTAL_MPU_REGIONS == 16. */ - #endif /* configTOTAL_MPU_REGIONS */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - - /* Pop the registers that are not automatically saved on exception entry. */ - ldmia r0!, {r3-r11, r14} - msr control, r3 - /* Restore the task stack pointer. */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ + +#ifdef configTOTAL_MPU_REGIONS + #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ +#endif + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ msr psp, r0 + stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ + msr control, r3 + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 msr basepri, r0 - bx r14 + bx lr /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index 96787e7c31f..4bb8abcdecb 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -195,9 +195,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -207,9 +243,12 @@ typedef struct MPU_SETTINGS /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -348,6 +387,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + #ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html" #define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0 diff --git a/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM55/non_secure/portasm.s b/portable/IAR/ARM_CM55/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM55/non_secure/portasm.s +++ b/portable/IAR/ARM_CM55/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 63f83993db9..c1a1cc5aa5a 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -267,6 +267,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -303,28 +307,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM85/non_secure/portasm.s b/portable/IAR/ARM_CM85/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM85/non_secure/portasm.s +++ b/portable/IAR/ARM_CM85/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index 7bbe1b7bc53..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} - if( xRunPrivileged == pdTRUE ) +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1128,6 +1646,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1686,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -1372,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 69ba2f1d9ac..cbc7b2d156b 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -71,7 +71,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index 73c206ca5f8..98aa5be0745 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -119,7 +119,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR_AVRDx/portmacro.h b/portable/IAR/AVR_AVRDx/portmacro.h index 5b4b5be8b3c..cb3cb7ec201 100644 --- a/portable/IAR/AVR_AVRDx/portmacro.h +++ b/portable/IAR/AVR_AVRDx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR_Mega0/portmacro.h b/portable/IAR/AVR_Mega0/portmacro.h index 5b4b5be8b3c..cb3cb7ec201 100644 --- a/portable/IAR/AVR_Mega0/portmacro.h +++ b/portable/IAR/AVR_Mega0/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AtmelSAM7S64/port.c b/portable/IAR/AtmelSAM7S64/port.c index 1e14d2e4aaa..32489702348 100644 --- a/portable/IAR/AtmelSAM7S64/port.c +++ b/portable/IAR/AtmelSAM7S64/port.c @@ -218,7 +218,7 @@ AT91PS_PITC pxPIT = AT91C_BASE_PITC; /* Configure the PIT period. */ pxPIT->PITC_PIMR = portPIT_ENABLE | portPIT_INT_ENABLE | portPIT_COUNTER_VALUE; - /* Enable the interrupt. Global interrupts are disables at this point so + /* Enable the interrupt. Global interrupts are disabled at this point so this is safe. */ AT91F_AIC_EnableIt( AT91C_BASE_AIC, AT91C_ID_SYS ); } diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 01c6eed6aad..9bcf8a67003 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index db6f10cca85..fdc7c2ba684 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index 8fdf1fe75a3..2929ecba54a 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/MSP430/portmacro.h b/portable/IAR/MSP430/portmacro.h index 298307f5519..6dcec2f0314 100644 --- a/portable/IAR/MSP430/portmacro.h +++ b/portable/IAR/MSP430/portmacro.h @@ -58,7 +58,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/MSP430X/portmacro.h b/portable/IAR/MSP430X/portmacro.h index 1ab4665b9bf..72456cf11c0 100644 --- a/portable/IAR/MSP430X/portmacro.h +++ b/portable/IAR/MSP430X/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 9685b569d5d..1cfe85fa650 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -82,7 +82,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 8ef593f5523..91c924a05c7 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -329,6 +329,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -365,28 +369,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index ae7ce37f37b..91038fd69b5 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -294,6 +294,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -330,28 +334,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index cb003aa38f9..3c2040596b0 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -360,6 +360,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -396,28 +400,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..aa1e825fc1d --- /dev/null +++ b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,1993 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskDelayUntilImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv +MPU_xTaskDelayUntil_Priv + pop {r0} + b MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntil_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskAbortDelayImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv +MPU_xTaskAbortDelay_Priv + pop {r0} + b MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelay_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskDelayImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv +MPU_vTaskDelay_Priv + pop {r0} + b MPU_vTaskDelayImpl +MPU_vTaskDelay_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskPriorityGetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv +MPU_uxTaskPriorityGet_Priv + pop {r0} + b MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_eTaskGetStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv +MPU_eTaskGetState_Priv + pop {r0} + b MPU_eTaskGetStateImpl +MPU_eTaskGetState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskGetInfoImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv +MPU_vTaskGetInfo_Priv + pop {r0} + b MPU_vTaskGetInfoImpl +MPU_vTaskGetInfo_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetIdleTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv +MPU_xTaskGetIdleTaskHandle_Priv + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSuspendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv +MPU_vTaskSuspend_Priv + pop {r0} + b MPU_vTaskSuspendImpl +MPU_vTaskSuspend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskResumeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv +MPU_vTaskResume_Priv + pop {r0} + b MPU_vTaskResumeImpl +MPU_vTaskResume_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTaskGetTickCount( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetTickCountImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv +MPU_xTaskGetTickCount_Priv + pop {r0} + b MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCount_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetNumberOfTasksImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv +MPU_uxTaskGetNumberOfTasks_Priv + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasks_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; + +__asm char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcTaskGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv +MPU_pcTaskGetName_Priv + pop {r0} + b MPU_pcTaskGetNameImpl +MPU_pcTaskGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetRunTimeCounterImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv +MPU_ulTaskGetRunTimeCounter_Priv + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounter_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetRunTimePercentImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv +MPU_ulTaskGetRunTimePercent_Priv + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercent_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetIdleRunTimePercentImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv +MPU_ulTaskGetIdleRunTimePercent_Priv + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercent_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetIdleRunTimeCounterImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv +MPU_ulTaskGetIdleRunTimeCounter_Priv + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounter_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetApplicationTaskTagImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv +MPU_vTaskSetApplicationTaskTag_Priv + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTag_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetApplicationTaskTagImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv +MPU_xTaskGetApplicationTaskTag_Priv + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTag_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetThreadLocalStoragePointerImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv +MPU_vTaskSetThreadLocalStoragePointer_Priv + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointer_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; + +__asm void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pvTaskGetThreadLocalStoragePointerImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv +MPU_pvTaskGetThreadLocalStoragePointer_Priv + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetSystemStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv +MPU_uxTaskGetSystemState_Priv + pop {r0} + b MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetStackHighWaterMarkImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv +MPU_uxTaskGetStackHighWaterMark_Priv + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMark_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetStackHighWaterMark2Impl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv +MPU_uxTaskGetStackHighWaterMark2_Priv + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetCurrentTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv +MPU_xTaskGetCurrentTaskHandle_Priv + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGetSchedulerState( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetSchedulerStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv +MPU_xTaskGetSchedulerState_Priv + pop {r0} + b MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetTimeOutStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv +MPU_vTaskSetTimeOutState_Priv + pop {r0} + b MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskCheckForTimeOutImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv +MPU_xTaskCheckForTimeOut_Priv + pop {r0} + b MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOut_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv +MPU_xTaskGenericNotify_Priv + pop {r0} + b MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotify_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyWaitImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv +MPU_xTaskGenericNotifyWait_Priv + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWait_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGenericNotifyTakeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv +MPU_ulTaskGenericNotifyTake_Priv + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTake_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyStateClearImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv +MPU_xTaskGenericNotifyStateClear_Priv + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClear_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL; + +__asm uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGenericNotifyValueClearImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv +MPU_ulTaskGenericNotifyValueClear_Priv + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClear_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGenericSendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv +MPU_xQueueGenericSend_Priv + pop {r0} + b MPU_xQueueGenericSendImpl +MPU_xQueueGenericSend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxQueueMessagesWaitingImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv +MPU_uxQueueMessagesWaiting_Priv + pop {r0} + b MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaiting_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxQueueSpacesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv +MPU_uxQueueSpacesAvailable_Priv + pop {r0} + b MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueReceiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv +MPU_xQueueReceive_Priv + pop {r0} + b MPU_xQueueReceiveImpl +MPU_xQueueReceive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueuePeekImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv +MPU_xQueuePeek_Priv + pop {r0} + b MPU_xQueuePeekImpl +MPU_xQueuePeek_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueSemaphoreTakeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv +MPU_xQueueSemaphoreTake_Priv + pop {r0} + b MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTake_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGetMutexHolderImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv +MPU_xQueueGetMutexHolder_Priv + pop {r0} + b MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolder_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueTakeMutexRecursiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv +MPU_xQueueTakeMutexRecursive_Priv + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGiveMutexRecursiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv +MPU_xQueueGiveMutexRecursive_Priv + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueSelectFromSetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv +MPU_xQueueSelectFromSet_Priv + pop {r0} + b MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueAddToSetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv +MPU_xQueueAddToSet_Priv + pop {r0} + b MPU_xQueueAddToSetImpl +MPU_xQueueAddToSet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vQueueAddToRegistryImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv +MPU_vQueueAddToRegistry_Priv + pop {r0} + b MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistry_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vQueueUnregisterQueueImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv +MPU_vQueueUnregisterQueue_Priv + pop {r0} + b MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueue_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcQueueGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv +MPU_pcQueueGetName_Priv + pop {r0} + b MPU_pcQueueGetNameImpl +MPU_pcQueueGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pvTimerGetTimerIDImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv +MPU_pvTimerGetTimerID_Priv + pop {r0} + b MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerID_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTimerSetTimerIDImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv +MPU_vTimerSetTimerID_Priv + pop {r0} + b MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerID_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerIsTimerActiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv +MPU_xTimerIsTimerActive_Priv + pop {r0} + b MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetTimerDaemonTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv +MPU_xTimerGetTimerDaemonTaskHandle_Priv + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGenericCommandImpl + + push {r0} + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv +MPU_xTimerGenericCommand_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +MPU_xTimerGenericCommand_Priv + pop {r0} + b MPU_xTimerGenericCommandImpl +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcTimerGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv +MPU_pcTimerGetName_Priv + pop {r0} + b MPU_pcTimerGetNameImpl +MPU_pcTimerGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTimerSetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv +MPU_vTimerSetReloadMode_Priv + pop {r0} + b MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv +MPU_xTimerGetReloadMode_Priv + pop {r0} + b MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTimerGetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv +MPU_uxTimerGetReloadMode_Priv + pop {r0} + b MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetPeriodImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv +MPU_xTimerGetPeriod_Priv + pop {r0} + b MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriod_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetExpiryTimeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv +MPU_xTimerGetExpiryTime_Priv + pop {r0} + b MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTime_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupWaitBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv +MPU_xEventGroupWaitBits_Priv + pop {r0} + b MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupClearBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv +MPU_xEventGroupClearBits_Priv + pop {r0} + b MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupSetBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv +MPU_xEventGroupSetBits_Priv + pop {r0} + b MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupSyncImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv +MPU_xEventGroupSync_Priv + pop {r0} + b MPU_xEventGroupSyncImpl +MPU_xEventGroupSync_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxEventGroupGetNumberImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv +MPU_uxEventGroupGetNumber_Priv + pop {r0} + b MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumber_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vEventGroupSetNumberImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv +MPU_vEventGroupSetNumber_Priv + pop {r0} + b MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumber_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv +MPU_xStreamBufferSend_Priv + pop {r0} + b MPU_xStreamBufferSendImpl +MPU_xStreamBufferSend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferReceiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv +MPU_xStreamBufferReceive_Priv + pop {r0} + b MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferIsFullImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv +MPU_xStreamBufferIsFull_Priv + pop {r0} + b MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFull_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferIsEmptyImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv +MPU_xStreamBufferIsEmpty_Priv + pop {r0} + b MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmpty_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSpacesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv +MPU_xStreamBufferSpacesAvailable_Priv + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferBytesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv +MPU_xStreamBufferBytesAvailable_Priv + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSetTriggerLevelImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv +MPU_xStreamBufferSetTriggerLevel_Priv + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevel_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferNextMessageLengthBytesImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv +MPU_xStreamBufferNextMessageLengthBytes_Priv + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytes_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 13e2f8a8ed1..7bcf6bc8813 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -108,13 +108,34 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* Each task maintains its own interrupt status in the critical nesting * variable. Note this is not saved as part of the task context as context * switches can only occur when uxCriticalNesting is zero. */ @@ -158,7 +179,7 @@ static void prvRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION; /* * Function to enable the VFP. @@ -215,6 +236,61 @@ void vResetPrivilege( void ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +/** + * @brief Triggers lazy stacking of FPU registers. + */ +static void prvTriggerLazyStacking( void ) PRIVILEGED_FUNCTION; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* @@ -223,43 +299,59 @@ void vResetPrivilege( void ); StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; + } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) { uint8_t ucSVCNumber; - uint32_t ulReg, ulPC; + uint32_t ulPC, ulReg; #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) extern uint32_t __syscalls_flash_start__; @@ -300,11 +392,11 @@ void prvSVCHandler( uint32_t * pulParam ) { __asm { -/* *INDENT-OFF* */ + /* *INDENT-OFF* */ mrs ulReg, control /* Obtain current control value. */ bic ulReg, # 1 /* Set privilege bit. */ msr control, ulReg /* Write back new control value. */ -/* *INDENT-ON* */ + /* *INDENT-ON* */ } } @@ -313,14 +405,14 @@ void prvSVCHandler( uint32_t * pulParam ) case portSVC_RAISE_PRIVILEGE: __asm { -/* *INDENT-OFF* */ + /* *INDENT-OFF* */ mrs ulReg, control /* Obtain current control value. */ bic ulReg, # 1 /* Set privilege bit. */ msr control, ulReg /* Write back new control value. */ -/* *INDENT-ON* */ + /* *INDENT-ON* */ } break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -328,9 +420,339 @@ void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +__asm void prvTriggerLazyStacking( void ) /* PRIVILEGED_FUNCTION */ +{ +/* *INDENT-OFF* */ + PRESERVE8 + + vpush {s0} /* Trigger lazy stacking. */ + vpop {s0} /* Nullify the affect of the above instruction. */ + +/* *INDENT-ON* */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm + { + msr psp, pulSystemCallStack + }; + + /* Raise the privilege for the duration of the system call. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + bic r1, #1 /* Clear nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm + { + msr psp, pulSystemCallStack + }; + + /* Raise the privilege for the duration of the system call. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + bic r1, #1 /* Clear nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm + { + msr psp, pulTaskStack + }; + + /* Drop the privilege before returning to the thread mode. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + orr r1, #1 /* Set nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +__asm void vPortSVCHandler( void ) +{ + extern vSVCHandler_C + extern vSystemCallEnter + extern vSystemCallEnter_1 + extern vSystemCallExit + +/* *INDENT-OFF* */ + PRESERVE8 + + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #portSVC_SYSTEM_CALL_ENTER + beq syscall_enter + cmp r2, #portSVC_SYSTEM_CALL_ENTER_1 + beq syscall_enter_1 + cmp r2, #portSVC_SYSTEM_CALL_EXIT + beq syscall_exit + b vSVCHandler_C + +syscall_enter + mov r1, lr + b vSystemCallEnter + +syscall_enter_1 + mov r1, lr + b vSystemCallEnter_1 + +syscall_exit + mov r1, lr + b vSystemCallExit +/* *INDENT-ON* */ +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + __asm void vPortSVCHandler( void ) { - extern prvSVCHandler + extern vSVCHandler_C /* *INDENT-OFF* */ PRESERVE8 @@ -345,9 +767,11 @@ __asm void vPortSVCHandler( void ) mrs r0, psp #endif - b prvSVCHandler + b vSVCHandler_C /* *INDENT-ON* */ } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ __asm void prvRestoreContextOfFirstTask( void ) @@ -355,45 +779,54 @@ __asm void prvRestoreContextOfFirstTask( void ) /* *INDENT-OFF* */ PRESERVE8 - ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ - ldr r0, [ r0 ] - ldr r0, [ r0 ] - msr msp, r0 /* Set the msp back to the start of the stack. */ - ldr r3, =pxCurrentTCB /* Restore the context. */ - ldr r1, [ r3 ] - ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */ - add r1, r1, #4 /* Move onto the second item in the TCB... */ - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [ r2 ] /* Disable MPU. */ - - ldr r2, =0xe000ed9c /* Region Base Address register. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] + ldr r0, [r0] + msr msp, r0 /* Set the msp back to the start of the stack. */ + + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ #if ( configTOTAL_MPU_REGIONS == 16 ) - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [ r2 ] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ - ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */ + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + msr psp, r0 + stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ msr control, r3 - msr psp, r0 /* Restore the task stack pointer. */ + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 msr basepri, r0 - bx r14 - nop + bx lr /* *INDENT-ON* */ } /*-----------------------------------------------------------*/ @@ -453,6 +886,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -489,28 +926,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; @@ -668,72 +1083,90 @@ __asm void xPortPendSVHandler( void ) /* *INDENT-OFF* */ PRESERVE8 - mrs r0, psp - - ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */ - ldr r2, [ r3 ] - - tst r14, #0x10 /* Is the task using the FPU context? If so, push high vfp registers. */ - it eq - vstmdbeq r0 !, { s16 - s31 } + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location where the context should be saved. */ - mrs r1, control - stmdb r0 !, { r1, r4 - r11, r14 } /* Save the remaining registers. */ - str r0, [ r2 ] /* Save the new top of stack into the first member of the TCB. */ + /*------------ Save Context. ----------- */ + mrs r3, control + mrs r0, psp + isb - stmdb sp !, { r0, r3 } - mov r0, # configMAX_SYSCALL_INTERRUPT_PRIORITY - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif + add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */ + + stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */ + ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */ + stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */ + + /*---------- Select next task. --------- */ + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif msr basepri, r0 dsb isb - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 - ldmia sp !, { r0, r3 } - /* Restore the context. */ - ldr r1, [ r3 ] - ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */ - add r1, r1, #4 /* Move onto the second item in the TCB... */ - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [ r2 ] /* Disable MPU. */ - - ldr r2, =0xe000ed9c /* Region Base Address register. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - - #if ( configTOTAL_MPU_REGIONS == 16 ) - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [ r2 ] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */ + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ + +#if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ +#endif /* configTOTAL_MPU_REGIONS == 16. */ + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + msr psp, r0 + stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ msr control, r3 - tst r14, #0x10 /* Is the task using the FPU context? If so, pop the high vfp registers too. */ - it eq - vldmiaeq r0 !, { s16 - s31 } + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ - msr psp, r0 - bx r14 - nop + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr /* *INDENT-ON* */ } /*-----------------------------------------------------------*/ @@ -952,11 +1385,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -979,6 +1420,12 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -999,12 +1446,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[lIndex].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -1013,6 +1476,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + __asm uint32_t prvPortGetIPSR( void ) { /* *INDENT-OFF* */ diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index c7cd5628951..cc4e136d67e 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -193,9 +193,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -209,9 +245,12 @@ typedef struct MPU_SETTINGS /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -314,6 +353,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + static portFORCE_INLINE void vPortSetBASEPRI( uint32_t ulBASEPRI ) { __asm diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index 1df54ab2802..8414acc99e6 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -344,6 +344,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -380,28 +384,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 8292f2d7ed9..e48ddb2195e 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -54,6 +54,8 @@ #define portLONG long #define portSHORT int +#define portPOINTER_SIZE_TYPE uint16_t + typedef uint8_t StackType_t; typedef int8_t BaseType_t; typedef uint8_t UBaseType_t; diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 5ac570d9a59..23aec65526e 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -332,17 +332,17 @@ void vPortEnableInterrupts( void ) } /*-----------------------------------------------------------*/ -portBASE_TYPE xPortSetInterruptMask( void ) +UBaseType_t xPortSetInterruptMask( void ) { /* Interrupts are always disabled inside ISRs (signals * handlers). */ - return pdTRUE; + return ( UBaseType_t )0; } /*-----------------------------------------------------------*/ -void vPortClearInterruptMask( portBASE_TYPE xMask ) +void vPortClearInterruptMask( UBaseType_t uxMask ) { - ( void ) xMask; + ( void ) uxMask; } /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index a3ab7d32393..154a039758c 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -70,8 +70,6 @@ typedef unsigned long TickType_t; /*-----------------------------------------------------------*/ /* Architecture specifics. */ -#define portNORETURN __attribute__( ( noreturn ) ) - #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) @@ -94,8 +92,8 @@ extern void vPortEnableInterrupts( void ); #define portSET_INTERRUPT_MASK() ( vPortDisableInterrupts() ) #define portCLEAR_INTERRUPT_MASK() ( vPortEnableInterrupts() ) -extern portBASE_TYPE xPortSetInterruptMask( void ); -extern void vPortClearInterruptMask( portBASE_TYPE xMask ); +extern UBaseType_t xPortSetInterruptMask( void ); +extern void vPortClearInterruptMask( UBaseType_t xMask ); extern void vPortEnterCritical( void ); extern void vPortExitCritical( void ); @@ -114,7 +112,7 @@ extern void vPortCancelThread( void *pxTaskToDelete ); #define portCLEAN_UP_TCB( pxTCB ) vPortCancelThread( pxTCB ) /*-----------------------------------------------------------*/ -#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) __attribute__( ( noreturn ) ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index cec4f7fedeb..fffd8c66c2a 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -82,7 +82,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS @@ -182,8 +181,8 @@ #else extern void vTaskEnterCritical( void ); extern void vTaskExitCritical( void ); - extern portBASE_TYPE vTaskEnterCriticalFromISR( void ); - extern void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); + extern UBaseType_t vTaskEnterCriticalFromISR( void ); + extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #define portENTER_CRITICAL() vTaskEnterCritical() #define portEXIT_CRITICAL() vTaskExitCritical() #define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index 5ff4c0931d1..1db96517af6 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -6,6 +6,7 @@ add_library(FreeRTOS-Kernel-Core INTERFACE) target_sources(FreeRTOS-Kernel-Core INTERFACE + ${FREERTOS_KERNEL_PATH}/croutine.c ${FREERTOS_KERNEL_PATH}/event_groups.c ${FREERTOS_KERNEL_PATH}/list.c ${FREERTOS_KERNEL_PATH}/queue.c diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index 575659ac597..e87d560cfcd 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -335,8 +335,6 @@ /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portNORETURN __attribute__( ( noreturn ) ) - #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 4 diff --git a/portable/ThirdParty/xClang/XCOREAI/portmacro.h b/portable/ThirdParty/xClang/XCOREAI/portmacro.h index 019d29a3c7a..36907b258be 100644 --- a/portable/ThirdParty/xClang/XCOREAI/portmacro.h +++ b/portable/ThirdParty/xClang/XCOREAI/portmacro.h @@ -165,8 +165,8 @@ void vTaskExitCritical(void); #define portENTER_CRITICAL() vTaskEnterCritical() #define portEXIT_CRITICAL() vTaskExitCritical() -extern portBASE_TYPE vTaskEnterCriticalFromISR( void ); -extern void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ); +extern UBaseType_t vTaskEnterCriticalFromISR( void ); +extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR #define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR diff --git a/portable/WizC/PIC18/portmacro.h b/portable/WizC/PIC18/portmacro.h index ad70560e4ad..0fdba688eee 100644 --- a/portable/WizC/PIC18/portmacro.h +++ b/portable/WizC/PIC18/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFF ) #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/queue.c b/queue.c index 37ab1f24dff..e87db0f45e3 100644 --- a/queue.c +++ b/queue.c @@ -38,6 +38,10 @@ #include "task.h" #include "queue.h" +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * for the header files above, but not in this file, in order to generate the @@ -1107,7 +1111,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const BaseType_t xCopyPosition ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1135,7 +1139,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1260,7 +1264,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1270,7 +1274,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; /* Similar to xQueueGenericSendFromISR() but used with semaphores where the @@ -1306,7 +1310,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1426,7 +1430,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1965,7 +1969,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1987,7 +1991,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2047,7 +2051,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -2057,7 +2061,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; int8_t * pcOriginalReadPosition; Queue_t * const pxQueue = xQueue; @@ -2081,7 +2085,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2102,7 +2106,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -2226,6 +2230,12 @@ void vQueueDelete( QueueHandle_t xQueue ) #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/ +UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ +{ + return ( ( Queue_t * ) xQueue )->uxItemSize; +} +/*-----------------------------------------------------------*/ + #if ( configUSE_MUTEXES == 1 ) static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) @@ -2559,6 +2569,293 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ /*-----------------------------------------------------------*/ +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already full we may have to block. A critical section + * is required to prevent an interrupt removing something from the queue + * between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + * posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + * return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + * into the ready list as we are within a critical section. + * Instead the same pending ready list mechanism is used as if + * the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + * that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, + void * pvBuffer, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already empty we may have to block. A critical section + * is required to prevent an interrupt adding something to the queue + * between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + * leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + * indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + * into the ready list as we are within a critical section. + * Instead the same pending ready list mechanism is used as if + * the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + * exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + * co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, + void * pvBuffer, + BaseType_t * pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* We cannot block from an ISR, so check there is data available. If + * not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + #if ( configQUEUE_REGISTRY_SIZE > 0 ) void vQueueAddToRegistry( QueueHandle_t xQueue, diff --git a/stream_buffer.c b/stream_buffer.c index 890202e601a..b7410fc0668 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -78,7 +78,7 @@ */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvRECEIVE_COMPLETED( pxStreamBuffer ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ @@ -87,7 +87,7 @@ { \ sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -96,9 +96,9 @@ #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ do { \ - portBASE_TYPE xSavedInterruptStatus; \ + UBaseType_t uxSavedInterruptStatus; \ \ - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,14 +109,14 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ @@ -125,7 +125,7 @@ { \ sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) @@ -173,9 +173,9 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ do { \ - portBASE_TYPE xSavedInterruptStatus; \ + UBaseType_t uxSavedInterruptStatus; \ \ - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,7 +186,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -436,11 +436,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if ( configASSERT_DEFINED == 1 ) - + { /* Sanity check that the size of the structure used to declare a * variable of type StaticStreamBuffer_t equals the size of the real * message buffer structure. */ - configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } /*lint !e529 xSize is referenced is configASSERT() is defined. */ #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) @@ -478,7 +480,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StaticStreamBuffer_t ** ppxStaticStreamBuffer ) { BaseType_t xReturn; - const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; configASSERT( pxStreamBuffer ); configASSERT( ppucStreamBufferStorageArea ); @@ -1214,11 +1216,11 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1234,7 +1236,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1245,11 +1247,11 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1265,7 +1267,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/tasks.c b/tasks.c index 0a4a1fe672b..b991b7da4bb 100644 --- a/tasks.c +++ b/tasks.c @@ -126,12 +126,12 @@ /* uxTopReadyPriority holds the priority of the highest priority ready * state task. */ #define taskRECORD_READY_PRIORITY( uxPriority ) \ - { \ + do { \ if( ( uxPriority ) > uxTopReadyPriority ) \ { \ uxTopReadyPriority = ( uxPriority ); \ } \ - } /* taskRECORD_READY_PRIORITY */ + } while( 0 ) /* taskRECORD_READY_PRIORITY */ /*-----------------------------------------------------------*/ @@ -549,9 +549,9 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * void prvMinimalIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #if ( configNUMBER_OF_CORES > 1 ) - static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #endif /* @@ -1644,17 +1644,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { #if ( portSTACK_GROWTH < 0 ) { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #else /* portSTACK_GROWTH */ { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #endif /* portSTACK_GROWTH */ } #else /* portHAS_STACK_OVERFLOW_CHECKING */ { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #endif /* portHAS_STACK_OVERFLOW_CHECKING */ } @@ -2038,7 +2038,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * block. */ const TickType_t xConstTickCount = xTickCount; - configASSERT( uxSchedulerSuspended == 1 ); + configASSERT( uxSchedulerSuspended == 1U ); /* Generate the tick time at which the task wants to wake. */ xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; @@ -2124,7 +2124,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { vTaskSuspendAll(); { - configASSERT( uxSchedulerSuspended == 1 ); + configASSERT( uxSchedulerSuspended == 1U ); traceTASK_DELAY(); @@ -2169,6 +2169,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { eTaskState eReturn; List_t const * pxStateList; + List_t const * pxEventList; List_t const * pxDelayedList; List_t const * pxOverflowedDelayedList; const TCB_t * const pxTCB = xTask; @@ -2187,12 +2188,20 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, taskENTER_CRITICAL(); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } taskEXIT_CRITICAL(); - if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + if( pxEventList == &xPendingReadyList ) + { + /* The task has been placed on the pending ready list, so its + * state is eReady regardless of what list the task's state list + * item is currently placed on. */ + eReturn = eReady; + } + else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) { /* The task being queried is referenced from one of the Blocked * lists. */ @@ -2310,7 +2319,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { TCB_t const * pxTCB; UBaseType_t uxReturn; - portBASE_TYPE xSavedInterruptState; + UBaseType_t uxSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -2330,14 +2339,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptState = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptState ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } @@ -2983,7 +2992,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { BaseType_t xYieldRequired = pdFALSE; TCB_t * const pxTCB = xTaskToResume; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToResume ); @@ -3005,7 +3014,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3061,7 +3070,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xYieldRequired; } @@ -3646,7 +3655,7 @@ TickType_t xTaskGetTickCount( void ) TickType_t xTaskGetTickCountFromISR( void ) { TickType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are @@ -3664,11 +3673,11 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { xReturn = xTickCount; } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -4251,7 +4260,7 @@ BaseType_t xTaskIncrementTick( void ) * item at the head of the delayed list. This is the time * at which the task at the head of the delayed list must * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); if( xConstTickCount < xItemValue ) @@ -4496,18 +4505,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -4819,7 +4828,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) * * This function assumes that a check has already been made to ensure that * pxEventList is not empty. */ - pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); @@ -4902,7 +4911,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, /* Remove the event list form the event flag. Interrupts do not access * event flags. */ - pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( pxEventListItem ); @@ -5599,6 +5608,18 @@ static void prvCheckTasksWaitingTermination( void ) } } #endif /* INCLUDE_vTaskSuspend */ + + /* Tasks can be in pending ready list and other state list at the + * same time. These tasks are in ready state no matter what state + * list the task is in. */ + taskENTER_CRITICAL(); + { + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) + { + pxTaskStatus->eCurrentState = eReady; + } + } + taskEXIT_CRITICAL(); } } else @@ -5641,7 +5662,7 @@ static void prvCheckTasksWaitingTermination( void ) if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) { - listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ /* Populate an TaskStatus_t structure within the * pxTaskStatusArray array for each task that is referenced from @@ -5649,7 +5670,7 @@ static void prvCheckTasksWaitingTermination( void ) * meaning of each TaskStatus_t structure member. */ do { - listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); uxTask++; } while( pxNextTCB != pxFirstTCB ); @@ -5847,13 +5868,13 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; - xSavedInterruptStatus = portSET_INTERRUPT_MASK(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); { xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; } - portCLEAR_INTERRUPT_MASK( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); return xReturn; } @@ -6315,13 +6336,13 @@ static void prvResetNextTaskUnblockTime( void ) #if ( configNUMBER_OF_CORES > 1 ) - portBASE_TYPE vTaskEnterCriticalFromISR( void ) + UBaseType_t vTaskEnterCriticalFromISR( void ) { - portBASE_TYPE xSavedInterruptStatus = 0; + UBaseType_t uxSavedInterruptStatus = 0; if( xSchedulerRunning != pdFALSE ) { - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { @@ -6335,7 +6356,7 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } - return xSavedInterruptStatus; + return uxSavedInterruptStatus; } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ @@ -6441,7 +6462,7 @@ static void prvResetNextTaskUnblockTime( void ) #if ( configNUMBER_OF_CORES > 1 ) - void vTaskExitCriticalFromISR( portBASE_TYPE xSavedInterruptStatus ) + void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { if( xSchedulerRunning != pdFALSE ) { @@ -6456,7 +6477,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT() == 0U ) { portRELEASE_ISR_LOCK(); - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } else { @@ -7087,7 +7108,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -7112,7 +7133,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -7226,7 +7247,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -7242,7 +7263,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -7267,7 +7288,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - xSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -7337,7 +7358,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -7605,6 +7626,21 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, } #endif /* INCLUDE_vTaskSuspend */ } +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + + pxTCB = prvGetTCBFromHandle( xTask ); + + return &( pxTCB->xMPUSettings ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ /* Code below here allows additional code to be inserted into this source file, * especially where access to file scope functions and data is needed (for example diff --git a/timers.c b/timers.c index cff986ae7ff..0028c334851 100644 --- a/timers.c +++ b/timers.c @@ -159,7 +159,7 @@ * task. Other tasks communicate with the timer service task using the * xTimerQueue queue. */ - static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; /* * Called by the timer service task to interpret and process a command it @@ -606,7 +606,7 @@ static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) { - Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ /* Remove the timer from the list of active timers. A check has already * been performed to ensure the list is not empty. */ From 7acf8fd14d24b700281ee46acb07877c9eb2b95b Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:50:40 +0800 Subject: [PATCH 163/164] Merge main to SMP branch 0721 (#90) * Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. * move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR * Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal * Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. * Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. * Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan * Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. * Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update PR template to include checkbox for Unit Test related changes (#627) * Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal * Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> * Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 * Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt * Add missing FreeRTOS+ defines * Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal * Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal * Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu * Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal * Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal * add a missing comma (#651) * fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi * ARMv7M: Adjust implemented priority bit assertions (#665) Adjust assertions related to the CMSIS __NVIC_PRIO_BITS and FreeRTOS configPRIO_BITS configuration macros such that these macros specify the minimum number of implemented priority bits supported by a config build rather than the exact number of implemented priority bits. Related to Qemu issue #1122 * Format portmacro.h in arm CM0 ports * portable/ARM_CM0: Add xPortIsInsideInterrupt Add missing xPortIsInsideInterrupt function to Cortex-M0 port. * tree-wide: Unify formatting of __cplusplus ifdefs * Paranthesize expression-like macro (#668) * Updated tasks.c checks for scheduler suspension (#670) This commit updates the checks for the variable uxSchedulerSuspended in tasks.c module to use a uniform format. Signed-off-by: Sudeep Mohanty * Fix cast alignment warning (#669) * Fix cast alignment warning Without this change, the code produces the following warning when compiled with `-Wcast-align` flag: ``` cast increases required alignment of target type ``` Signed-off-by: Gaurav Aggarwal * Align StackSize and StackAddress for macOS (#674) * Armv8-M (except Cortex-M23) interrupt priority checking (#673) * Armv8-M: Formatting changes Signed-off-by: Devaraj Ranganna * Armv8-M: Add support for interrupt priority check FreeRTOS provides `FromISR` system calls which can be called directly from interrupt service routines. It is crucial that the priority of these ISRs is set to same or lower value (numerically higher) than that of `configMAX_SYSCALL_INTERRUPT_PRIORITY`. For more information refer to https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. Add a check to trigger an assert when an ISR with priority higher (numerically lower) than `configMAX_SYSCALL_INTERRUPT_PRIORITY` calls `FromISR` system calls if `configASSERT` macro is defined. In addition, add a config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` to disable interrupt priority check while running on QEMU. Based on the discussion https://gitlab.com/qemu-project/qemu/-/issues/1122, The interrupt priority bits in QEMU do not match the real hardware. Therefore the assert that checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. The config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the `FreeRTOSConfig.h` for QEMU targets. Signed-off-by: Devaraj Ranganna * Use SHPR2 for calculating interrupt priority bits This removes the dependency on the secure software to mark the interrupt as non-secure. Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Use the extended movx instruction instead of mov (#676) The following is from the MSP430X instruction set - ``` MOVX.W Move source word to destination word. The source operand is copied to the destination. The source operand is not affected. Both operands may be located in the full address space. ``` The movx instruction allows both the operands to be located in the full address space and therefore, works with large data model as well. Signed-off-by: Gaurav Aggarwal * Fix eTaskGetState for pending ready tasks (#679) This commit fixes eTaskGetState so that eReady is returned for pending ready tasks. Co-authored-by: Darian Leung * Generates SBOM after source files are updated with release tag (#680) * update source file with release version info before SBOM generation * delete tag branch during cleanup * Add back croutines by reverting PR#590 (#685) * Add croutines to the code base * Add croutine changes to cmake, lexicon and readme * Add croutine file to portable cmake file * Add back more references from PR 591 * Remove __NVIC_PRIO_BITS and configPRIO_BITS check in port (#683) * Remove __NVIC_PRIO_BITS and configPRIO_BITS check in CM3, CM4 and ARMv8. * Add hardware not implemented bits check. These bits should be zero. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Use UBaseType_t as interrupt mask (#689) * Use UBaseType_t as interrupt mask * Update GCC posix port to use UBaseType_t as interrupt mask * Fix clang warning in croutine and stream buffer (#686) * Fix document warning in croutine * Fix cast-qual warning in stream buffer * Use portTASK_FUNCTION_PROTO to replace portNORETURN (#688) * Use portTASK_FUNCTION_PROTO to replace portNORETURN * Fix typo in check comment of configMAX_SYSCALL_INTERRUPT_PRIORITY (#690) * Add constant type for portMAX_DELAY in port (#691) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update static stream buffer size check (#693) * Use volatile size instead of sizeof directly to prevent always true/false warning * Fix typos in comments for the AT91SAM7S port (#695) Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> * Fix #697: Missing portPOINTER_SIZE_TYPE definition for ATmega port (#698) * Remove empty expression statement compiler warning (#692) * Add do while( 0 ) loop for empty expression statement compiler warning * Update uxTaskGetSystemState for tasks in pending ready list (#702) * Update uxTaskGetSystemState to sync with eTaskGetState * Update in vTaskGetInfo for tasks in pending ready list should be in ready state. * Fix circular dependency in CMake project (#700) * Fix circular dependency in cmake project Fix for https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/687 In order for custom ports to also break the cycle, they must link against freertos_kernel_include instead of freertos_kernel. * Simplify include path * Memory Protection Unit (MPU) Enhancements (#705) Memory Protection Unit (MPU) Enhancements This commit introduces a new MPU wrapper that places additional restrictions on unprivileged tasks. The following is the list of changes introduced with the new MPU wrapper: 1. Opaque and indirectly verifiable integers for kernel object handles: All the kernel object handles (for example, queue handles) are now opaque integers. Previously object handles were raw pointers. 2. Saving the task context in Task Control Block (TCB): When a task is swapped out by the scheduler, the task's context is now saved in its TCB. Previously the task's context was saved on its stack. 3. Execute system calls on a separate privileged only stack: FreeRTOS system calls, which execute with elevated privilege, now use a separate privileged only stack. Previously system calls used the calling task's stack. The application writer can control the size of the system call stack using new configSYSTEM_CALL_STACK_SIZE config macro. 4. Memory bounds checks: FreeRTOS system calls which accept a pointer and de-reference it, now verify that the calling task has required permissions to access the memory location referenced by the pointer. 5. System call restrictions: The following system calls are no longer available to unprivileged tasks: - vQueueDelete - xQueueCreateMutex - xQueueCreateMutexStatic - xQueueCreateCountingSemaphore - xQueueCreateCountingSemaphoreStatic - xQueueGenericCreate - xQueueGenericCreateStatic - xQueueCreateSet - xQueueRemoveFromSet - xQueueGenericReset - xTaskCreate - xTaskCreateStatic - vTaskDelete - vTaskPrioritySet - vTaskSuspendAll - xTaskResumeAll - xTaskGetHandle - xTaskCallApplicationTaskHook - vTaskList - vTaskGetRunTimeStats - xTaskCatchUpTicks - xEventGroupCreate - xEventGroupCreateStatic - vEventGroupDelete - xStreamBufferGenericCreate - xStreamBufferGenericCreateStatic - vStreamBufferDelete - xStreamBufferReset Also, an unprivileged task can no longer use vTaskSuspend to suspend any task other than itself. We thank the following people for their inputs in these enhancements: - David Reiss of Meta Platforms, Inc. - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling of School of Computer Science and Engineering, Southeast University, China. - Xinwen Fu of Department of Computer Science, University of Massachusetts Lowell, USA. - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado Boulder, USA. * Update History for Version 10.6.0 (#706) Signed-off-by: kar-rahul-aws * Fixed compile options polluting project (#694) * Fixed compile options polluting project Moved add_library higher * Apply suggestions from code review Co-authored-by: Paul Bartell * fixed cmakelists keeping in mind the suggestions --------- Co-authored-by: Paul Bartell Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> * Fix the comments in the CM3 and CM4 MPU Ports about the MPU Region numbers being loaded (#707) Co-authored-by: Soren Ptak Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Update xSemaphoreGetStaticBuffer prototype in comment (#704) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * Correct the misspelled name (#708) Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Signed-off-by: Cervenka Dusan Signed-off-by: Devaraj Ranganna Signed-off-by: Keith Packard Signed-off-by: Vo Trung Chi Signed-off-by: Sudeep Mohanty Signed-off-by: kar-rahul-aws Co-authored-by: Archit Gupta <71798289+archigup@users.noreply.github.com> Co-authored-by: tcpluess Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Chris Copeland Co-authored-by: David J. Fiddes <35607151+davefiddes@users.noreply.github.com> Co-authored-by: Dusan Cervenka Co-authored-by: bbain <16752579+bbain@users.noreply.github.com> Co-authored-by: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Co-authored-by: phelter Co-authored-by: jacky309 Co-authored-by: Jacques GUILLOU Co-authored-by: Devaraj Ranganna Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Joseph Julicher Co-authored-by: Paul Bartell Co-authored-by: Christos Zosimidis Co-authored-by: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Co-authored-by: Holden Co-authored-by: Darian <32921628+Dazza0@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Nicolas Co-authored-by: Vo Trung Chi Co-authored-by: Sudeep Mohanty <91244425+sudeep-mohanty@users.noreply.github.com> Co-authored-by: Monika Singh <108652024+moninom1@users.noreply.github.com> Co-authored-by: Darian Leung Co-authored-by: Tony Josi Co-authored-by: Evgeny Ermakov <22344340+unspecd@users.noreply.github.com> Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Co-authored-by: Joris Putcuyps Co-authored-by: Patrick Cook <114708437+cookpate@users.noreply.github.com> Co-authored-by: Mr. Jake Co-authored-by: Paul Bartell Co-authored-by: Soren Ptak Co-authored-by: Soren Ptak From 9be537ef309d016d006d61f9f54a7f9b411d02ad Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 24 Jul 2023 16:18:31 +0800 Subject: [PATCH 164/164] Move default configNUMBER_OF_CORES definition forward in FreeRTOSConfig.h (#88) * Move default configNUMBER_OF_CORES definition forward in FreeRTOSConfig.h. --- include/FreeRTOS.h | 9 +++++---- portable/ThirdParty/GCC/RP2040/include/portmacro.h | 3 --- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index a9fa182934a..5016ebf5eee 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -86,6 +86,11 @@ #define configUSE_MPU_WRAPPERS_V1 0 #endif +/* Set default value of configNUMBER_OF_CORES to 1 to use single core FreeRTOS. */ +#ifndef configNUMBER_OF_CORES + #define configNUMBER_OF_CORES 1 +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" @@ -363,10 +368,6 @@ #define portSOFTWARE_BARRIER() #endif -#ifndef configNUMBER_OF_CORES - #define configNUMBER_OF_CORES 1 -#endif - #ifndef configRUN_MULTIPLE_PRIORITIES #define configRUN_MULTIPLE_PRIORITIES 0 #endif diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index fffd8c66c2a..655b8ec7441 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -114,9 +114,6 @@ /* Multi-core */ #define portMAX_CORE_COUNT 2 - #ifndef configNUMBER_OF_CORES - #define configNUMBER_OF_CORES 2 - #endif /* Check validity of number of cores specified in config */ #if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES )